mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 00:13:49 +01:00
vendor: update all dependencies
This commit is contained in:
parent
61ff7306ae
commit
696d012c05
88
go.mod
88
go.mod
@ -1,73 +1,77 @@
|
||||
module github.com/rclone/rclone
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc
|
||||
cloud.google.com/go v0.53.0 // indirect
|
||||
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05
|
||||
cloud.google.com/go v0.59.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.2
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.6.0 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0
|
||||
github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4
|
||||
github.com/aalpar/deheap v0.0.0-20191229192855-f837f7a9ba26
|
||||
github.com/aalpar/deheap v0.0.0-20200318053559-9a0c2883bd56
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/anacrolix/dms v1.1.0
|
||||
github.com/atotto/clipboard v0.1.2
|
||||
github.com/aws/aws-sdk-go v1.29.9
|
||||
github.com/billziss-gh/cgofuse v1.2.0
|
||||
github.com/aws/aws-sdk-go v1.32.11
|
||||
github.com/billziss-gh/cgofuse v1.3.0
|
||||
github.com/btcsuite/btcutil v1.0.2 // indirect
|
||||
github.com/calebcase/tmpfile v1.0.2 // indirect
|
||||
github.com/coreos/go-semver v0.3.0
|
||||
github.com/djherbis/times v1.2.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3-0.20191108143333-152e6ac32d54
|
||||
github.com/jlaffaye/ftp v0.0.0-20200422224957-b9f3ade29122
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3
|
||||
github.com/jlaffaye/ftp v0.0.0-20200602180915-5563613968bf
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20190818202018-e0dc8fd921dc
|
||||
github.com/klauspost/cpuid v1.3.0 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a
|
||||
github.com/mattn/go-colorable v0.1.4
|
||||
github.com/mattn/go-ieproxy v0.0.0-20200203040449-2dbc853185d9 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.8
|
||||
github.com/mattn/go-colorable v0.1.7
|
||||
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9
|
||||
github.com/minio/minio-go/v6 v6.0.57 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.50
|
||||
github.com/nsf/termbox-go v0.0.0-20200204031403-4d2b513ad8be
|
||||
github.com/ncw/swift v1.0.52
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/onsi/ginkgo v1.9.0 // indirect
|
||||
github.com/onsi/gomega v1.6.0 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.11.0
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/rfjakob/eme v1.1.1
|
||||
github.com/sevlyar/go-daemon v0.1.5
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/smartystreets/assertions v1.0.1 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8
|
||||
github.com/xanzy/ssh-agent v0.2.1
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60
|
||||
github.com/youmark/pkcs8 v0.0.0-20200520070018-fad002e585ce
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
go.etcd.io/bbolt v1.3.3
|
||||
goftp.io/server v0.3.2
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opencensus.io v0.22.4 // indirect
|
||||
go.uber.org/zap v1.15.0 // indirect
|
||||
goftp.io/server v0.3.4
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
google.golang.org/api v0.21.1-0.20200411000818-c8cf5cff125e
|
||||
google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
storj.io/uplink v1.1.1
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae
|
||||
golang.org/x/text v0.3.3
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
|
||||
google.golang.org/api v0.28.0
|
||||
google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5 // indirect
|
||||
google.golang.org/grpc v1.30.0 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
gopkg.in/ini.v1 v1.57.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
|
||||
storj.io/common v0.0.0-20200624215549-bf610d22d466 // indirect
|
||||
storj.io/uplink v1.1.2
|
||||
)
|
||||
|
||||
go 1.14
|
||||
|
400
go.sum
400
go.sum
@ -1,5 +1,5 @@
|
||||
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510=
|
||||
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05 h1:UrYe9YkT4Wpm6D+zByEyCJQzDqTPXqTDUI7bZ41i9VE=
|
||||
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05/go.mod h1:h0h5FBYpXThbvSfTqthw+0I4nmHnhTHkO5BoOHsBWqg=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
@ -8,26 +8,40 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.59.0 h1:BM3svUDU3itpc2m5cu5wCyThIYNDlFlts9GASw31GW8=
|
||||
cloud.google.com/go v0.59.0/go.mod h1:qJxNOVCRTxHfwLhvDxxSI9vQc1zI59b9pEglp1Iv60E=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs=
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
@ -42,14 +56,15 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||
github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd h1:+CYOsXi89xOqBkj7CuEJjA2It+j+R3ngUZEydr6mtkw=
|
||||
github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w738kjQHaGJcon8uCLMS8fk=
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/aalpar/deheap v0.0.0-20191229192855-f837f7a9ba26 h1:vn0bf9tSbwrl9aBeG5hqGPpWeZGEybrB3LUJkHP/x6w=
|
||||
github.com/aalpar/deheap v0.0.0-20191229192855-f837f7a9ba26/go.mod h1:EJFoWbcEEVK22GYKONJjtMNamGYe6p+3x1Pr6zV5gFs=
|
||||
github.com/aalpar/deheap v0.0.0-20200318053559-9a0c2883bd56 h1:hJO00l0f92EcQn8Ygc9Y0oP++eESKvcyp+KedtfT5SQ=
|
||||
github.com/aalpar/deheap v0.0.0-20200318053559-9a0c2883bd56/go.mod h1:EJFoWbcEEVK22GYKONJjtMNamGYe6p+3x1Pr6zV5gFs=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
@ -61,22 +76,20 @@ github.com/anacrolix/dms v1.1.0 h1:vbBXZS7T5FaZm+9p1pdmVVo9tN3qdc27bKSETdeT3xo=
|
||||
github.com/anacrolix/dms v1.1.0/go.mod h1:msPKAoppoNRfrYplJqx63FZ+VipDZ4Xsj3KzIQxyU7k=
|
||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/ffprobe v1.0.0 h1:j8fGLBsXejwdXd0pkA9iR3Dt1XwMFv5wjeYWObcue8A=
|
||||
github.com/anacrolix/ffprobe v1.0.0/go.mod h1:BIw+Bjol6CWjm/CRWrVLk2Vy+UYlkgmBZ05vpSYqZPw=
|
||||
github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
|
||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/atotto/clipboard v0.1.2 h1:YZCtFu5Ie8qX2VmVTBnrqLSiU9XOWwqNRmdT3gIQzbY=
|
||||
github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go v1.29.9 h1:PHq9ddjfZYfCOXyqHKiCZ1CHRAk7nXhV7WTqj5l+bmQ=
|
||||
github.com/aws/aws-sdk-go v1.29.9/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||
github.com/aws/aws-sdk-go v1.32.11 h1:1nYF+Tfccn/hnAZsuwPPMSCVUVnx3j6LKOpx/WhgH0A=
|
||||
github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/billziss-gh/cgofuse v1.2.0 h1:FMdQSygSBpD4yEPENJcmvfCdmNWMVkPLlD7wWdl/7IA=
|
||||
github.com/billziss-gh/cgofuse v1.2.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/billziss-gh/cgofuse v1.3.0 h1:mFj8XQg/vvxMFywNy1F7IqFYcMeBqceYTh1+iUhpsk8=
|
||||
github.com/billziss-gh/cgofuse v1.3.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
@ -84,6 +97,8 @@ github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufo
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.1 h1:GKOz8BnRjYrb/JTKgaOk+zh26NWNdSNvdvv0xoAZMSA=
|
||||
github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
|
||||
github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
|
||||
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
@ -91,6 +106,9 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/calebcase/tmpfile v1.0.1 h1:vD8FSrbsbexhep39/6mvtbIHS3GzIRqiprDNCF6QqSk=
|
||||
github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
|
||||
github.com/calebcase/tmpfile v1.0.2-0.20200602150926-3af473ef8439/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
|
||||
github.com/calebcase/tmpfile v1.0.2 h1:1AGuhKiUu4J6wxz6lxuF6ck3f8G2kaV6KSEny0RGCig=
|
||||
github.com/calebcase/tmpfile v1.0.2/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
@ -100,9 +118,9 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@ -123,7 +141,12 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible h1:DtumzkLk2zZ2SeElEr+VNz+zV7l+BTe509cV4sKPXbM=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@ -132,6 +155,7 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
@ -141,10 +165,10 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
@ -153,49 +177,62 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hanwen/go-fuse v1.0.0 h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc=
|
||||
github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3-0.20191108143333-152e6ac32d54 h1:0JL4/kY3QKTRevfl0IbEncTzA+jczGba+swfDBBluuU=
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3-0.20191108143333-152e6ac32d54/go.mod h1:HH3ygZOoyRbP9y2q7y3+JM6hPL+Epe29IbWaS0UA81o=
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3 h1:kpV28BKeSyVgZREItBLnaVBvOEwv2PuhNdKetwnvNHo=
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
@ -206,14 +243,16 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200422224957-b9f3ade29122 h1:dzYWuozdWNaY7mTQh5ZdmoJt2BUMavwhiux0AfGwg90=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200422224957-b9f3ade29122/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200602180915-5563613968bf h1:U96JHc+AF5zL3M3q/ljvvwuRgjbCAxlPh0KzpDcwlIE=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200602180915-5563613968bf/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
@ -225,14 +264,18 @@ github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6/go.mod h1:KmH
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.0 h1:2JqaNE1hGdABW2YbA3TenkO7RiPFRvSWnEnGqWh9sHE=
|
||||
github.com/klauspost/cpuid v1.3.0/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koofr/go-httpclient v0.0.0-20190818202018-e0dc8fd921dc h1:Y+Dob6xos1WxridFN8sIiWPOlEQzgeF9ij9dXOsXK44=
|
||||
github.com/koofr/go-httpclient v0.0.0-20190818202018-e0dc8fd921dc/go.mod h1:3xszwh+rNrYk1r9SStc4iJ326gne1OaBcrdB1ACsbzI=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348 h1:Lrn8srO9JDBCf2iPjqy62stl49UDwoOxZ9/NGVi+fnk=
|
||||
github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348/go.mod h1:JBLy//Q5jzU3XSMxdONTD5EIj1LhTPktosxG2Bw1iho=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a h1:02cx9xF4W2FQ1oh8CK9dWV5BnZK2mUtcbr9xR+bZiKk=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
@ -246,49 +289,55 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20200203040449-2dbc853185d9 h1:a6/tH/zhh2tdoUSDgS4gNcY6H2Mae/70R+jEkRv52ws=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20200203040449-2dbc853185d9/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
||||
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=
|
||||
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
github.com/minio/minio-go/v6 v6.0.46 h1:waExJtO53xrnsNX//7cSc1h3478wqTryDx4RVD7o26I=
|
||||
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/minio-go/v6 v6.0.57 h1:ixPkbKkyD7IhnluRgQpGSpHdpvNVaW6OD5R9IAO/9Tw=
|
||||
github.com/minio/minio-go/v6 v6.0.57/go.mod h1:5+R/nM9Pwrh0vqF+HbYYDQ84wdUFPyXHkrdT4AIkifM=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.50 h1:E01b5bVIssNhx2KnzAjMWEXkKrb8ytTqCDWY7lqmWjA=
|
||||
github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
||||
github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU=
|
||||
github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/nsf/termbox-go v0.0.0-20200204031403-4d2b513ad8be h1:yzmWtPyxEUIKdZg4RcPq64MfS8NA6A5fNOJgYhpR9EQ=
|
||||
github.com/nsf/termbox-go v0.0.0-20200204031403-4d2b513ad8be/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 h1:lh3PyZvY+B9nFliSGTn5uFuqQQJGuNrD0MLCokv09ag=
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.9.0 h1:SZjF721BByVj8QH636/8S2DnX4n0Re3SteMmw3N+tzc=
|
||||
github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.6.0 h1:8XTW0fcJZEq9q+Upcyws4JSGua2MFysCL5xkaSgHc+M=
|
||||
github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
@ -296,7 +345,6 @@ github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uC
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@ -305,34 +353,31 @@ github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8=
|
||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 h1:w2CpS5muK+jyydnmlkqpAhzKmHmMBzBkfYUDjQNS1Dk=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/rfjakob/eme v1.1.1 h1:t+CgvcOn+eDvj2xdglxsSnkgg8LM8jwdxnV7OnsrTn0=
|
||||
github.com/rfjakob/eme v1.1.1/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
@ -343,16 +388,17 @@ github.com/sevlyar/go-daemon v0.1.5/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
|
||||
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
|
||||
@ -365,26 +411,23 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709 h1:Ko2LQMrRU+Oy/+EDBwX7eZ2jp3C47eDBB8EIhKTun+I=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d h1:GsRmok9VXIEc5B6SmRWuuO9hx4x2YBqFqgOXGt9Xs94=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8 h1:IGJQmLBLYBdAknj21W3JsVof0yjEXfy1Q0K3YZebDOg=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
@ -397,8 +440,10 @@ github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70
|
||||
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60 h1:Ud2neINE1YFEwrcJ4EqnbRZlm9R3T8SuFKeqjIw7k44=
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/youmark/pkcs8 v0.0.0-20200520070018-fad002e585ce h1:F5MEHq8k6JiE10MNYaQjbKRdF1xWkOavn9aoSrHqGno=
|
||||
github.com/youmark/pkcs8 v0.0.0-20200520070018-fad002e585ce/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 h1:9sB2WZMgjwSUNZhrgvaNGazVltoFUUfuS9f0uCWtTr8=
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4=
|
||||
github.com/zeebo/admission/v3 v3.0.1/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw=
|
||||
@ -409,37 +454,49 @@ github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtC
|
||||
github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo=
|
||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
goftp.io/server v0.3.2 h1:bcsI4ijbvFZkA4rrUtIE/t1jNhT+0uSkiTQ4ASjZAXQ=
|
||||
goftp.io/server v0.3.2/go.mod h1:wfeAZeQgacupLVl+Ex3ozYFaAGNfCKYZiZNxLzgw6z4=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
goftp.io/server v0.3.4 h1:Sq0leWKoZFRWtbxNrHRqKpBygVg2kcRPqgbXnLIFRVU=
|
||||
goftp.io/server v0.3.4/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -447,7 +504,9 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -456,18 +515,20 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -483,18 +544,25 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0 h1:MsuvTghUPjX762sGLnGsxC3HM0B5r83wEtYcYR8/vRs=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
@ -503,10 +571,11 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -514,7 +583,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190415145633-3fd5a3612ccd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -524,29 +592,45 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
@ -562,100 +646,158 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nSRS/JrZEig0gz2BY4VNrg=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa h1:mMXQKlWCw9mIWgVLLfiycDZjMHMMYqiuakI4E/l2xcA=
|
||||
golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.21.1-0.20200411000818-c8cf5cff125e h1:apiFYpP/2xKVIDR0xuW1rrDQlSaBrCe62sQBAUthQFE=
|
||||
google.golang.org/api v0.21.1-0.20200411000818-c8cf5cff125e/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 h1:1+0Z5cgv1eDXJD9z2tdQF9PSSQnJXwism490hJydMRI=
|
||||
google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5 h1:a/Sqq5B3dGnmxhuJZIHFsIxhEkqElErr5TaU6IqBAj0=
|
||||
google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
storj.io/common v0.0.0-20200611114417-9a3d012fdb62 h1:y8vGNQ0HjtD79G8MfCwbs6hct40tSBoDaOnsxWOZpU4=
|
||||
storj.io/common v0.0.0-20200611114417-9a3d012fdb62/go.mod h1:6S6Ub92/BB+ofU7hbyPcm96b4Q1ayyN0HLog+3u+wGc=
|
||||
storj.io/common v0.0.0-20200624215549-bf610d22d466 h1:XQM2PQMJMoEaRluJOYfVONi/IclVhJgfEEPDVLAt5MU=
|
||||
storj.io/common v0.0.0-20200624215549-bf610d22d466/go.mod h1:vMAnlNbkgW6i+w/OT1h4X8w6TajOHWAT+SvFHUFCpq0=
|
||||
storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA=
|
||||
storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
|
||||
storj.io/uplink v1.1.1 h1:oa5uoDtZDT58e3vy9yp24HKU1EaLs4TRM75DS+ICtqs=
|
||||
storj.io/uplink v1.1.1/go.mod h1:UkdYN/dfSgv+d8fBUoZTrX2oLdj9gzX6Q7tp3CojgKA=
|
||||
storj.io/drpc v0.0.13 h1:EDR3WiwVcIHtg+8M5vqBFmUAuJvmM2erVHIfqPPSAoc=
|
||||
storj.io/drpc v0.0.13/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
|
||||
storj.io/uplink v1.1.2 h1:r0EyoEDlAvqWd6SZDG10w0k2+CjSMi4wAq5J1Sw8y9Y=
|
||||
storj.io/uplink v1.1.2/go.mod h1:UkdYN/dfSgv+d8fBUoZTrX2oLdj9gzX6Q7tp3CojgKA=
|
||||
|
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
ENOATTR = Errno(syscall.ENOATTR)
|
||||
)
|
||||
|
||||
const (
|
||||
errNoXattr = ENOATTR
|
||||
)
|
||||
|
||||
func init() {
|
||||
errnoNames[errNoXattr] = "ENOATTR"
|
||||
}
|
11
vendor/bazil.org/fuse/error_std.go
generated
vendored
11
vendor/bazil.org/fuse/error_std.go
generated
vendored
@ -4,17 +4,16 @@ package fuse
|
||||
// across platforms.
|
||||
//
|
||||
// getxattr return value for "extended attribute does not exist" is
|
||||
// ENOATTR on OS X, and ENODATA on Linux and apparently at least
|
||||
// NetBSD. There may be a #define ENOATTR on Linux too, but the value
|
||||
// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no
|
||||
// ENODATA, only ENOATTR. ENOATTR is not in any of the standards,
|
||||
// ENODATA exists but is only used for STREAMs.
|
||||
// ENODATA on Linux and apparently at least NetBSD. There may be a
|
||||
// #define ENOATTR on Linux too, but the value is ENODATA in the
|
||||
// actual syscalls. FreeBSD and OpenBSD have no ENODATA, only ENOATTR.
|
||||
// ENOATTR is not in any of the standards, ENODATA exists but is only
|
||||
// used for STREAMs.
|
||||
//
|
||||
// Each platform will define it a errNoXattr constant, and this file
|
||||
// will enforce that it implements the right interfaces and hide the
|
||||
// implementation.
|
||||
//
|
||||
// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html
|
||||
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html
|
||||
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
||||
|
486
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
486
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
@ -19,6 +20,7 @@ import (
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fuseutil"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -205,7 +207,7 @@ type NodeForgetter interface {
|
||||
// method calls.
|
||||
//
|
||||
// Forget is not necessarily seen on unmount, as all nodes are
|
||||
// implicitly forgotten as part part of the unmount.
|
||||
// implicitly forgotten as part of the unmount.
|
||||
Forget()
|
||||
}
|
||||
|
||||
@ -256,7 +258,6 @@ func nodeAttr(ctx context.Context, n Node, attr *fuse.Attr) error {
|
||||
attr.Atime = startTime
|
||||
attr.Mtime = startTime
|
||||
attr.Ctime = startTime
|
||||
attr.Crtime = startTime
|
||||
if err := n.Attr(ctx, attr); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -322,6 +323,86 @@ type HandleReleaser interface {
|
||||
Release(ctx context.Context, req *fuse.ReleaseRequest) error
|
||||
}
|
||||
|
||||
type HandlePoller interface {
|
||||
// Poll checks whether the handle is currently ready for I/O, and
|
||||
// may request a wakeup when it is.
|
||||
//
|
||||
// Poll should always return quickly. Clients waiting for
|
||||
// readiness can be woken up by passing the return value of
|
||||
// PollRequest.Wakeup to fs.Server.NotifyPollWakeup or
|
||||
// fuse.Conn.NotifyPollWakeup.
|
||||
//
|
||||
// To allow supporting poll for only some of your Nodes/Handles,
|
||||
// the default behavior is to report immediate readiness. If your
|
||||
// FS does not support polling and you want to minimize needless
|
||||
// requests and log noise, implement NodePoller and return
|
||||
// syscall.ENOSYS.
|
||||
//
|
||||
// The Go runtime uses epoll-based I/O whenever possible, even for
|
||||
// regular files.
|
||||
Poll(ctx context.Context, req *fuse.PollRequest, resp *fuse.PollResponse) error
|
||||
}
|
||||
|
||||
type NodePoller interface {
|
||||
// Poll checks whether the node is currently ready for I/O, and
|
||||
// may request a wakeup when it is. See HandlePoller.
|
||||
Poll(ctx context.Context, req *fuse.PollRequest, resp *fuse.PollResponse) error
|
||||
}
|
||||
|
||||
// HandleLocker contains the common operations for all kinds of file
|
||||
// locks. See also lock family specific interfaces: HandleFlockLocker,
|
||||
// HandlePOSIXLocker.
|
||||
type HandleLocker interface {
|
||||
// Lock tries to acquire a lock on a byte range of the node. If a
|
||||
// conflicting lock is already held, returns syscall.EAGAIN.
|
||||
//
|
||||
// LockRequest.LockOwner is a file-unique identifier for this
|
||||
// lock, and will be seen in calls releasing this lock
|
||||
// (UnlockRequest, ReleaseRequest, FlushRequest) and also
|
||||
// in e.g. ReadRequest, WriteRequest.
|
||||
Lock(ctx context.Context, req *fuse.LockRequest) error
|
||||
|
||||
// LockWait acquires a lock on a byte range of the node, waiting
|
||||
// until the lock can be obtained (or context is canceled).
|
||||
LockWait(ctx context.Context, req *fuse.LockWaitRequest) error
|
||||
|
||||
// Unlock releases the lock on a byte range of the node. Locks can
|
||||
// be released also implicitly, see HandleFlockLocker and
|
||||
// HandlePOSIXLocker.
|
||||
Unlock(ctx context.Context, req *fuse.UnlockRequest) error
|
||||
|
||||
// QueryLock returns the current state of locks held for the byte
|
||||
// range of the node.
|
||||
//
|
||||
// See QueryLockRequest for details on how to respond.
|
||||
//
|
||||
// To simplify implementing this method, resp.Lock is prefilled to
|
||||
// have Lock.Type F_UNLCK, and the whole struct should be
|
||||
// overwritten for in case of conflicting locks.
|
||||
QueryLock(ctx context.Context, req *fuse.QueryLockRequest, resp *fuse.QueryLockResponse) error
|
||||
}
|
||||
|
||||
// HandleFlockLocker describes locking behavior unique to flock (BSD)
|
||||
// locks. See HandleLocker.
|
||||
type HandleFlockLocker interface {
|
||||
HandleLocker
|
||||
|
||||
// Flock unlocking can also happen implicitly as part of Release,
|
||||
// in which case Unlock is not called, and Release will have
|
||||
// ReleaseFlags bit ReleaseFlockUnlock set.
|
||||
HandleReleaser
|
||||
}
|
||||
|
||||
// HandlePOSIXLocker describes locking behavior unique to POSIX (fcntl
|
||||
// F_SETLK) locks. See HandleLocker.
|
||||
type HandlePOSIXLocker interface {
|
||||
HandleLocker
|
||||
|
||||
// POSIX unlocking can also happen implicitly as part of Flush,
|
||||
// in which case Unlock is not called.
|
||||
HandleFlusher
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// Function to send debug log messages to. If nil, use fuse.Debug.
|
||||
// Note that changing this or fuse.Debug may not affect existing
|
||||
@ -348,6 +429,7 @@ func New(conn *fuse.Conn, config *Config) *Server {
|
||||
conn: conn,
|
||||
req: map[fuse.RequestID]*serveRequest{},
|
||||
nodeRef: map[Node]fuse.NodeID{},
|
||||
notifyWait: map[fuse.RequestID]chan<- *fuse.NotifyReply{},
|
||||
dynamicInode: GenerateDynamicInode,
|
||||
}
|
||||
if config != nil {
|
||||
@ -380,6 +462,11 @@ type Server struct {
|
||||
freeHandle []fuse.HandleID
|
||||
nodeGen uint64
|
||||
|
||||
// pending notify upcalls to kernel
|
||||
notifyMu sync.Mutex
|
||||
notifySeq fuse.RequestID
|
||||
notifyWait map[fuse.RequestID]chan<- *fuse.NotifyReply
|
||||
|
||||
// Used to ensure worker goroutines finish before Serve returns
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
@ -470,12 +557,6 @@ type serveHandle struct {
|
||||
readData []byte
|
||||
}
|
||||
|
||||
// NodeRef is deprecated. It remains here to decrease code churn on
|
||||
// FUSE library users. You may remove it from your program now;
|
||||
// returning the same Node values are now recognized automatically,
|
||||
// without needing NodeRef.
|
||||
type NodeRef struct{}
|
||||
|
||||
func (c *Server) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) {
|
||||
c.meta.Lock()
|
||||
defer c.meta.Unlock()
|
||||
@ -522,11 +603,14 @@ type nodeRefcountDropBug struct {
|
||||
Node fuse.NodeID
|
||||
}
|
||||
|
||||
func (n *nodeRefcountDropBug) String() string {
|
||||
func (n nodeRefcountDropBug) String() string {
|
||||
return fmt.Sprintf("bug: trying to drop %d of %d references to %v", n.N, n.Refs, n.Node)
|
||||
}
|
||||
|
||||
func (c *Server) dropNode(id fuse.NodeID, n uint64) (forget bool) {
|
||||
// dropNode decreases reference count for node with id by n.
|
||||
// If reference count dropped to zero, returns true.
|
||||
// Note that node is not guaranteed to be non-nil.
|
||||
func (c *Server) dropNode(id fuse.NodeID, n uint64) (node Node, forget bool) {
|
||||
c.meta.Lock()
|
||||
defer c.meta.Unlock()
|
||||
snode := c.node[id]
|
||||
@ -539,7 +623,7 @@ func (c *Server) dropNode(id fuse.NodeID, n uint64) (forget bool) {
|
||||
|
||||
// we may end up triggering Forget twice, but that's better
|
||||
// than not even once, and that's the best we can do
|
||||
return true
|
||||
return nil, true
|
||||
}
|
||||
|
||||
if n > snode.refs {
|
||||
@ -553,9 +637,9 @@ func (c *Server) dropNode(id fuse.NodeID, n uint64) (forget bool) {
|
||||
c.node[id] = nil
|
||||
delete(c.nodeRef, snode.node)
|
||||
c.freeNode = append(c.freeNode, id)
|
||||
return true
|
||||
return snode.node, true
|
||||
}
|
||||
return false
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *Server) dropHandle(id fuse.HandleID) {
|
||||
@ -591,9 +675,7 @@ func (c *Server) getHandle(id fuse.HandleID) (shandle *serveHandle) {
|
||||
}
|
||||
|
||||
type request struct {
|
||||
Op string
|
||||
Request *fuse.Header
|
||||
In interface{} `json:",omitempty"`
|
||||
In interface{} `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (r request) String() string {
|
||||
@ -675,6 +757,57 @@ func (n notification) String() string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type notificationRequest struct {
|
||||
ID fuse.RequestID
|
||||
Op string
|
||||
Node fuse.NodeID
|
||||
Out interface{} `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (n notificationRequest) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, ">> %s [ID=%d] %v", n.Op, n.ID, n.Node)
|
||||
if n.Out != nil {
|
||||
// make sure (seemingly) empty values are readable
|
||||
switch n.Out.(type) {
|
||||
case string:
|
||||
fmt.Fprintf(&buf, " %q", n.Out)
|
||||
case []byte:
|
||||
fmt.Fprintf(&buf, " [% x]", n.Out)
|
||||
default:
|
||||
fmt.Fprintf(&buf, " %s", n.Out)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type notificationResponse struct {
|
||||
ID fuse.RequestID
|
||||
Op string
|
||||
In interface{} `json:",omitempty"`
|
||||
Err string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (n notificationResponse) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "<< [ID=%d] %s", n.ID, n.Op)
|
||||
if n.In != nil {
|
||||
// make sure (seemingly) empty values are readable
|
||||
switch n.In.(type) {
|
||||
case string:
|
||||
fmt.Fprintf(&buf, " %q", n.In)
|
||||
case []byte:
|
||||
fmt.Fprintf(&buf, " [% x]", n.In)
|
||||
default:
|
||||
fmt.Fprintf(&buf, " %s", n.In)
|
||||
}
|
||||
}
|
||||
if n.Err != "" {
|
||||
fmt.Fprintf(&buf, " Err:%v", n.Err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type logMissingNode struct {
|
||||
MaxNode fuse.NodeID
|
||||
}
|
||||
@ -763,6 +896,15 @@ func initLookupResponse(s *fuse.LookupResponse) {
|
||||
s.EntryValid = entryValidTime
|
||||
}
|
||||
|
||||
type logDuplicateRequestID struct {
|
||||
New fuse.Request
|
||||
Old fuse.Request
|
||||
}
|
||||
|
||||
func (m *logDuplicateRequestID) String() string {
|
||||
return fmt.Sprintf("Duplicate request: new %v, old %v", m.New, m.Old)
|
||||
}
|
||||
|
||||
func (c *Server) serve(r fuse.Request) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -773,11 +915,15 @@ func (c *Server) serve(r fuse.Request) {
|
||||
|
||||
req := &serveRequest{Request: r, cancel: cancel}
|
||||
|
||||
c.debug(request{
|
||||
Op: opName(r),
|
||||
Request: r.Hdr(),
|
||||
In: r,
|
||||
})
|
||||
switch r.(type) {
|
||||
case *fuse.NotifyReply:
|
||||
// don't log NotifyReply here, they're logged by the recipient
|
||||
// as soon as we have decoded them to the right types
|
||||
default:
|
||||
c.debug(request{
|
||||
In: r,
|
||||
})
|
||||
}
|
||||
var node Node
|
||||
var snode *serveNode
|
||||
c.meta.Lock()
|
||||
@ -805,15 +951,13 @@ func (c *Server) serve(r fuse.Request) {
|
||||
}
|
||||
node = snode.node
|
||||
}
|
||||
if c.req[hdr.ID] != nil {
|
||||
// This happens with OSXFUSE. Assume it's okay and
|
||||
// that we'll never see an interrupt for this one.
|
||||
// Otherwise everything wedges. TODO: Report to OSXFUSE?
|
||||
//
|
||||
// TODO this might have been because of missing done() calls
|
||||
} else {
|
||||
c.req[hdr.ID] = req
|
||||
if old, found := c.req[hdr.ID]; found {
|
||||
c.debug(logDuplicateRequestID{
|
||||
New: req.Request,
|
||||
Old: old.Request,
|
||||
})
|
||||
}
|
||||
c.req[hdr.ID] = req
|
||||
c.meta.Unlock()
|
||||
|
||||
// Call this before responding.
|
||||
@ -1170,7 +1314,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode,
|
||||
return nil
|
||||
|
||||
case *fuse.ForgetRequest:
|
||||
forget := c.dropNode(r.Hdr().Node, r.N)
|
||||
_, forget := c.dropNode(r.Hdr().Node, r.N)
|
||||
if forget {
|
||||
n, ok := node.(NodeForgetter)
|
||||
if ok {
|
||||
@ -1181,6 +1325,38 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode,
|
||||
r.Respond()
|
||||
return nil
|
||||
|
||||
case *fuse.BatchForgetRequest:
|
||||
// BatchForgetRequest is hard to unit test, as it
|
||||
// fundamentally relies on something unprivileged userspace
|
||||
// has little control over. A root-only, Linux-only test could
|
||||
// be written with `echo 2 >/proc/sys/vm/drop_caches`, but
|
||||
// that would still rely on timing, the number of batches and
|
||||
// operation spread over them could vary, it wouldn't run in a
|
||||
// typical container regardless of privileges, and it would
|
||||
// degrade performance for the rest of the machine. It would
|
||||
// still probably be worth doing, just not the most fun.
|
||||
|
||||
// node is nil here because BatchForget as a message is not
|
||||
// aimed at a any one node
|
||||
for _, item := range r.Forget {
|
||||
node, forget := c.dropNode(item.NodeID, item.N)
|
||||
// node can be nil here if kernel vs our refcount were out
|
||||
// of sync and multiple Forgets raced each other
|
||||
if node == nil {
|
||||
// nothing we can do about that
|
||||
continue
|
||||
}
|
||||
if forget {
|
||||
n, ok := node.(NodeForgetter)
|
||||
if ok {
|
||||
n.Forget()
|
||||
}
|
||||
}
|
||||
}
|
||||
done(nil)
|
||||
r.Respond()
|
||||
return nil
|
||||
|
||||
// Handle operations.
|
||||
case *fuse.ReadRequest:
|
||||
shandle := c.getHandle(r.Handle)
|
||||
@ -1376,17 +1552,129 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode,
|
||||
r.Respond()
|
||||
return nil
|
||||
|
||||
case *fuse.PollRequest:
|
||||
shandle := c.getHandle(r.Handle)
|
||||
if shandle == nil {
|
||||
return syscall.ESTALE
|
||||
}
|
||||
s := &fuse.PollResponse{}
|
||||
|
||||
if h, ok := shandle.handle.(HandlePoller); ok {
|
||||
if err := h.Poll(ctx, r, s); err != nil {
|
||||
return err
|
||||
}
|
||||
done(s)
|
||||
r.Respond(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
if n, ok := node.(NodePoller); ok {
|
||||
if err := n.Poll(ctx, r, s); err != nil {
|
||||
return err
|
||||
}
|
||||
done(s)
|
||||
r.Respond(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// fallback to always claim ready
|
||||
s.REvents = fuse.DefaultPollMask
|
||||
done(s)
|
||||
r.Respond(s)
|
||||
return nil
|
||||
|
||||
case *fuse.NotifyReply:
|
||||
c.notifyMu.Lock()
|
||||
w, ok := c.notifyWait[r.Hdr().ID]
|
||||
if ok {
|
||||
delete(c.notifyWait, r.Hdr().ID)
|
||||
}
|
||||
c.notifyMu.Unlock()
|
||||
if !ok {
|
||||
c.debug(notificationResponse{
|
||||
ID: r.Hdr().ID,
|
||||
Op: "NotifyReply",
|
||||
Err: "unknown ID",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
w <- r
|
||||
return nil
|
||||
|
||||
case *fuse.LockRequest:
|
||||
shandle := c.getHandle(r.Handle)
|
||||
if shandle == nil {
|
||||
return syscall.ESTALE
|
||||
}
|
||||
h, ok := shandle.handle.(HandleLocker)
|
||||
if !ok {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
if err := h.Lock(ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
done(nil)
|
||||
r.Respond()
|
||||
return nil
|
||||
|
||||
case *fuse.LockWaitRequest:
|
||||
shandle := c.getHandle(r.Handle)
|
||||
if shandle == nil {
|
||||
return syscall.ESTALE
|
||||
}
|
||||
h, ok := shandle.handle.(HandleLocker)
|
||||
if !ok {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
if err := h.LockWait(ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
done(nil)
|
||||
r.Respond()
|
||||
return nil
|
||||
|
||||
case *fuse.UnlockRequest:
|
||||
shandle := c.getHandle(r.Handle)
|
||||
if shandle == nil {
|
||||
return syscall.ESTALE
|
||||
}
|
||||
h, ok := shandle.handle.(HandleLocker)
|
||||
if !ok {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
if err := h.Unlock(ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
done(nil)
|
||||
r.Respond()
|
||||
return nil
|
||||
|
||||
case *fuse.QueryLockRequest:
|
||||
shandle := c.getHandle(r.Handle)
|
||||
if shandle == nil {
|
||||
return syscall.ESTALE
|
||||
}
|
||||
h, ok := shandle.handle.(HandleLocker)
|
||||
if !ok {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
s := &fuse.QueryLockResponse{
|
||||
Lock: fuse.FileLock{
|
||||
Type: unix.F_UNLCK,
|
||||
},
|
||||
}
|
||||
if err := h.QueryLock(ctx, r, s); err != nil {
|
||||
return err
|
||||
}
|
||||
done(s)
|
||||
r.Respond(s)
|
||||
return nil
|
||||
|
||||
/* case *FsyncdirRequest:
|
||||
return ENOSYS
|
||||
|
||||
case *GetlkRequest, *SetlkRequest, *SetlkwRequest:
|
||||
return ENOSYS
|
||||
|
||||
case *BmapRequest:
|
||||
return ENOSYS
|
||||
|
||||
case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest:
|
||||
return ENOSYS
|
||||
*/
|
||||
}
|
||||
|
||||
@ -1522,6 +1810,132 @@ func (s *Server) InvalidateEntry(parent Node, name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
type notifyStoreRetrieveDetail struct {
|
||||
Off uint64
|
||||
Size uint64
|
||||
}
|
||||
|
||||
func (i notifyStoreRetrieveDetail) String() string {
|
||||
return fmt.Sprintf("Off:%d Size:%d", i.Off, i.Size)
|
||||
}
|
||||
|
||||
type notifyRetrieveReplyDetail struct {
|
||||
Size uint64
|
||||
}
|
||||
|
||||
func (i notifyRetrieveReplyDetail) String() string {
|
||||
return fmt.Sprintf("Size:%d", i.Size)
|
||||
}
|
||||
|
||||
// NotifyStore puts data into the kernel page cache.
|
||||
//
|
||||
// Returns fuse.ErrNotCached if the kernel is not currently caching
|
||||
// the node.
|
||||
func (s *Server) NotifyStore(node Node, offset uint64, data []byte) error {
|
||||
s.meta.Lock()
|
||||
id, ok := s.nodeRef[node]
|
||||
if ok {
|
||||
snode := s.node[id]
|
||||
snode.wg.Add(1)
|
||||
defer snode.wg.Done()
|
||||
}
|
||||
s.meta.Unlock()
|
||||
if !ok {
|
||||
// This is what the kernel would have said, if we had been
|
||||
// able to send this message; it's not cached.
|
||||
return fuse.ErrNotCached
|
||||
}
|
||||
// Delay logging until after we can record the error too. We
|
||||
// consider a /dev/fuse write to be instantaneous enough to not
|
||||
// need separate before and after messages.
|
||||
err := s.conn.NotifyStore(id, offset, data)
|
||||
s.debug(notification{
|
||||
Op: "NotifyStore",
|
||||
Node: id,
|
||||
Out: notifyStoreRetrieveDetail{
|
||||
Off: offset,
|
||||
Size: uint64(len(data)),
|
||||
},
|
||||
Err: errstr(err),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// NotifyRetrieve gets data from the kernel page cache.
|
||||
//
|
||||
// Returns fuse.ErrNotCached if the kernel is not currently caching
|
||||
// the node.
|
||||
func (s *Server) NotifyRetrieve(node Node, offset uint64, size uint32) ([]byte, error) {
|
||||
s.meta.Lock()
|
||||
id, ok := s.nodeRef[node]
|
||||
if ok {
|
||||
snode := s.node[id]
|
||||
snode.wg.Add(1)
|
||||
defer snode.wg.Done()
|
||||
}
|
||||
s.meta.Unlock()
|
||||
if !ok {
|
||||
// This is what the kernel would have said, if we had been
|
||||
// able to send this message; it's not cached.
|
||||
return nil, fuse.ErrNotCached
|
||||
}
|
||||
|
||||
ch := make(chan *fuse.NotifyReply, 1)
|
||||
s.notifyMu.Lock()
|
||||
const wraparoundThreshold = 1 << 63
|
||||
if s.notifySeq > wraparoundThreshold {
|
||||
s.notifyMu.Unlock()
|
||||
return nil, errors.New("running out of notify sequence numbers")
|
||||
}
|
||||
s.notifySeq++
|
||||
seq := s.notifySeq
|
||||
s.notifyWait[seq] = ch
|
||||
s.notifyMu.Unlock()
|
||||
|
||||
s.debug(notificationRequest{
|
||||
ID: seq,
|
||||
Op: "NotifyRetrieve",
|
||||
Node: id,
|
||||
Out: notifyStoreRetrieveDetail{
|
||||
Off: offset,
|
||||
Size: uint64(size),
|
||||
},
|
||||
})
|
||||
retrieval, err := s.conn.NotifyRetrieve(seq, id, offset, size)
|
||||
if err != nil {
|
||||
s.debug(notificationResponse{
|
||||
ID: seq,
|
||||
Op: "NotifyRetrieve",
|
||||
Err: errstr(err),
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reply := <-ch
|
||||
data := retrieval.Finish(reply)
|
||||
s.debug(notificationResponse{
|
||||
ID: seq,
|
||||
Op: "NotifyRetrieve",
|
||||
In: notifyRetrieveReplyDetail{
|
||||
Size: uint64(len(data)),
|
||||
},
|
||||
})
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (s *Server) NotifyPollWakeup(wakeup fuse.PollWakeup) error {
|
||||
// Delay logging until after we can record the error too. We
|
||||
// consider a /dev/fuse write to be instantaneous enough to not
|
||||
// need separate before and after messages.
|
||||
err := s.conn.NotifyPollWakeup(wakeup)
|
||||
s.debug(notification{
|
||||
Op: "NotifyPollWakeup",
|
||||
Out: wakeup,
|
||||
Err: errstr(err),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// DataHandle returns a read-only Handle that satisfies reads
|
||||
// using the given data.
|
||||
func DataHandle(data []byte) Handle {
|
||||
|
2
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
2
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
@ -76,7 +76,7 @@ func (t *tree) add(name string, n Node) {
|
||||
}
|
||||
|
||||
func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0555
|
||||
a.Mode = os.ModeDir | 0o555
|
||||
return nil
|
||||
}
|
||||
|
||||
|
777
vendor/bazil.org/fuse/fuse.go
generated
vendored
777
vendor/bazil.org/fuse/fuse.go
generated
vendored
File diff suppressed because it is too large
Load Diff
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
@ -1,9 +0,0 @@
|
||||
package fuse
|
||||
|
||||
// Maximum file write size we are prepared to receive from the kernel.
|
||||
//
|
||||
// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will
|
||||
// forcibly close the /dev/fuse file descriptor on a Setxattr with a
|
||||
// 16MB value. See TestSetxattr16MB and
|
||||
// https://github.com/bazil/fuse/issues/42
|
||||
const maxWrite = 16 * 1024 * 1024
|
349
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
349
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
@ -39,20 +39,41 @@ import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// The FUSE version implemented by the package.
|
||||
const (
|
||||
protoVersionMinMajor = 7
|
||||
protoVersionMinMinor = 8
|
||||
protoVersionMinMinor = 17
|
||||
protoVersionMaxMajor = 7
|
||||
protoVersionMaxMinor = 12
|
||||
protoVersionMaxMinor = 17
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = 1
|
||||
)
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Blksize uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type kstatfs struct {
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
@ -66,13 +87,6 @@ type kstatfs struct {
|
||||
Spare [6]uint32
|
||||
}
|
||||
|
||||
type fileLock struct {
|
||||
Start uint64
|
||||
End uint64
|
||||
Type uint32
|
||||
Pid uint32
|
||||
}
|
||||
|
||||
// GetattrFlags are bit flags that can be seen in GetattrRequest.
|
||||
type GetattrFlags uint32
|
||||
|
||||
@ -94,24 +108,25 @@ func (fl GetattrFlags) String() string {
|
||||
type SetattrValid uint32
|
||||
|
||||
const (
|
||||
SetattrMode SetattrValid = 1 << 0
|
||||
SetattrUid SetattrValid = 1 << 1
|
||||
SetattrGid SetattrValid = 1 << 2
|
||||
SetattrSize SetattrValid = 1 << 3
|
||||
SetattrAtime SetattrValid = 1 << 4
|
||||
SetattrMtime SetattrValid = 1 << 5
|
||||
SetattrHandle SetattrValid = 1 << 6
|
||||
|
||||
// Linux only(?)
|
||||
SetattrMode SetattrValid = 1 << 0
|
||||
SetattrUid SetattrValid = 1 << 1
|
||||
SetattrGid SetattrValid = 1 << 2
|
||||
SetattrSize SetattrValid = 1 << 3
|
||||
SetattrAtime SetattrValid = 1 << 4
|
||||
SetattrMtime SetattrValid = 1 << 5
|
||||
SetattrHandle SetattrValid = 1 << 6
|
||||
SetattrAtimeNow SetattrValid = 1 << 7
|
||||
SetattrMtimeNow SetattrValid = 1 << 8
|
||||
SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html
|
||||
|
||||
// OS X only
|
||||
SetattrCrtime SetattrValid = 1 << 28
|
||||
SetattrChgtime SetattrValid = 1 << 29
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
SetattrCrtime SetattrValid = 1 << 28
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
SetattrChgtime SetattrValid = 1 << 29
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
SetattrBkuptime SetattrValid = 1 << 30
|
||||
SetattrFlags SetattrValid = 1 << 31
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
SetattrFlags SetattrValid = 1 << 31
|
||||
)
|
||||
|
||||
func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 }
|
||||
@ -124,10 +139,18 @@ func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 }
|
||||
func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 }
|
||||
func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 }
|
||||
func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 }
|
||||
func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 }
|
||||
func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 }
|
||||
func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 }
|
||||
func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 }
|
||||
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
func (fl SetattrValid) Crtime() bool { return false }
|
||||
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
func (fl SetattrValid) Chgtime() bool { return false }
|
||||
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
func (fl SetattrValid) Bkuptime() bool { return false }
|
||||
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
func (fl SetattrValid) Flags() bool { return false }
|
||||
|
||||
func (fl SetattrValid) String() string {
|
||||
return flagString(uint32(fl), setattrValidNames)
|
||||
@ -144,10 +167,6 @@ var setattrValidNames = []flagName{
|
||||
{uint32(SetattrAtimeNow), "SetattrAtimeNow"},
|
||||
{uint32(SetattrMtimeNow), "SetattrMtimeNow"},
|
||||
{uint32(SetattrLockOwner), "SetattrLockOwner"},
|
||||
{uint32(SetattrCrtime), "SetattrCrtime"},
|
||||
{uint32(SetattrChgtime), "SetattrChgtime"},
|
||||
{uint32(SetattrBkuptime), "SetattrBkuptime"},
|
||||
{uint32(SetattrFlags), "SetattrFlags"},
|
||||
}
|
||||
|
||||
// Flags that can be seen in OpenRequest.Flags.
|
||||
@ -160,7 +179,7 @@ const (
|
||||
OpenReadWrite OpenFlags = syscall.O_RDWR
|
||||
|
||||
// File was opened in append-only mode, all writes will go to end
|
||||
// of file. OS X does not provide this information.
|
||||
// of file. FreeBSD does not provide this information.
|
||||
OpenAppend OpenFlags = syscall.O_APPEND
|
||||
OpenCreate OpenFlags = syscall.O_CREAT
|
||||
OpenDirectory OpenFlags = syscall.O_DIRECTORY
|
||||
@ -232,10 +251,12 @@ type OpenResponseFlags uint32
|
||||
const (
|
||||
OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file
|
||||
OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open
|
||||
OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X or FreeBSD)
|
||||
OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on FreeBSD)
|
||||
|
||||
OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X
|
||||
OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
OpenPurgeAttr OpenResponseFlags = 1 << 30
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
OpenPurgeUBC OpenResponseFlags = 1 << 31
|
||||
)
|
||||
|
||||
func (fl OpenResponseFlags) String() string {
|
||||
@ -246,8 +267,6 @@ var openResponseFlagNames = []flagName{
|
||||
{uint32(OpenDirectIO), "OpenDirectIO"},
|
||||
{uint32(OpenKeepCache), "OpenKeepCache"},
|
||||
{uint32(OpenNonSeekable), "OpenNonSeekable"},
|
||||
{uint32(OpenPurgeAttr), "OpenPurgeAttr"},
|
||||
{uint32(OpenPurgeUBC), "OpenPurgeUBC"},
|
||||
}
|
||||
|
||||
// The InitFlags are used in the Init exchange.
|
||||
@ -255,12 +274,12 @@ type InitFlags uint32
|
||||
|
||||
const (
|
||||
InitAsyncRead InitFlags = 1 << 0
|
||||
InitPosixLocks InitFlags = 1 << 1
|
||||
InitPOSIXLocks InitFlags = 1 << 1
|
||||
InitFileOps InitFlags = 1 << 2
|
||||
InitAtomicTrunc InitFlags = 1 << 3
|
||||
InitExportSupport InitFlags = 1 << 4
|
||||
InitBigWrites InitFlags = 1 << 5
|
||||
// Do not mask file access modes with umask. Not supported on OS X.
|
||||
// Do not mask file access modes with umask.
|
||||
InitDontMask InitFlags = 1 << 6
|
||||
InitSpliceWrite InitFlags = 1 << 7
|
||||
InitSpliceMove InitFlags = 1 << 8
|
||||
@ -274,9 +293,12 @@ const (
|
||||
InitWritebackCache InitFlags = 1 << 16
|
||||
InitNoOpenSupport InitFlags = 1 << 17
|
||||
|
||||
InitCaseSensitive InitFlags = 1 << 29 // OS X only
|
||||
InitVolRename InitFlags = 1 << 30 // OS X only
|
||||
InitXtimes InitFlags = 1 << 31 // OS X only
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
InitCaseSensitive InitFlags = 1 << 29
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
InitVolRename InitFlags = 1 << 30
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
InitXtimes InitFlags = 1 << 31
|
||||
)
|
||||
|
||||
type flagName struct {
|
||||
@ -286,7 +308,7 @@ type flagName struct {
|
||||
|
||||
var initFlagNames = []flagName{
|
||||
{uint32(InitAsyncRead), "InitAsyncRead"},
|
||||
{uint32(InitPosixLocks), "InitPosixLocks"},
|
||||
{uint32(InitPOSIXLocks), "InitPOSIXLocks"},
|
||||
{uint32(InitFileOps), "InitFileOps"},
|
||||
{uint32(InitAtomicTrunc), "InitAtomicTrunc"},
|
||||
{uint32(InitExportSupport), "InitExportSupport"},
|
||||
@ -303,10 +325,6 @@ var initFlagNames = []flagName{
|
||||
{uint32(InitAsyncDIO), "InitAsyncDIO"},
|
||||
{uint32(InitWritebackCache), "InitWritebackCache"},
|
||||
{uint32(InitNoOpenSupport), "InitNoOpenSupport"},
|
||||
|
||||
{uint32(InitCaseSensitive), "InitCaseSensitive"},
|
||||
{uint32(InitVolRename), "InitVolRename"},
|
||||
{uint32(InitXtimes), "InitXtimes"},
|
||||
}
|
||||
|
||||
func (fl InitFlags) String() string {
|
||||
@ -336,7 +354,8 @@ func flagString(f uint32, names []flagName) string {
|
||||
type ReleaseFlags uint32
|
||||
|
||||
const (
|
||||
ReleaseFlush ReleaseFlags = 1 << 0
|
||||
ReleaseFlush ReleaseFlags = 1 << 0
|
||||
ReleaseFlockUnlock ReleaseFlags = 1 << 1
|
||||
)
|
||||
|
||||
func (fl ReleaseFlags) String() string {
|
||||
@ -345,6 +364,7 @@ func (fl ReleaseFlags) String() string {
|
||||
|
||||
var releaseFlagNames = []flagName{
|
||||
{uint32(ReleaseFlush), "ReleaseFlush"},
|
||||
{uint32(ReleaseFlockUnlock), "ReleaseFlockUnlock"},
|
||||
}
|
||||
|
||||
// Opcodes
|
||||
@ -385,13 +405,10 @@ const (
|
||||
opInterrupt = 36
|
||||
opBmap = 37
|
||||
opDestroy = 38
|
||||
opIoctl = 39 // Linux?
|
||||
opPoll = 40 // Linux?
|
||||
|
||||
// OS X
|
||||
opSetvolname = 61
|
||||
opGetxtimes = 62
|
||||
opExchange = 63
|
||||
opIoctl = 39
|
||||
opPoll = 40
|
||||
opNotifyReply = 41
|
||||
opBatchForget = 42
|
||||
)
|
||||
|
||||
type entryOut struct {
|
||||
@ -417,6 +434,16 @@ type forgetIn struct {
|
||||
Nlookup uint64
|
||||
}
|
||||
|
||||
type forgetOne struct {
|
||||
NodeID uint64
|
||||
Nlookup uint64
|
||||
}
|
||||
|
||||
type batchForgetIn struct {
|
||||
Count uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type getattrIn struct {
|
||||
GetattrFlags uint32
|
||||
_ uint32
|
||||
@ -439,14 +466,6 @@ func attrOutSize(p Protocol) uintptr {
|
||||
}
|
||||
}
|
||||
|
||||
// OS X
|
||||
type getxtimesOut struct {
|
||||
Bkuptime uint64
|
||||
Crtime uint64
|
||||
BkuptimeNsec uint32
|
||||
CrtimeNsec uint32
|
||||
}
|
||||
|
||||
type mknodIn struct {
|
||||
Mode uint32
|
||||
Rdev uint32
|
||||
@ -484,24 +503,16 @@ type renameIn struct {
|
||||
// "oldname\x00newname\x00" follows
|
||||
}
|
||||
|
||||
// OS X
|
||||
type exchangeIn struct {
|
||||
Olddir uint64
|
||||
Newdir uint64
|
||||
Options uint64
|
||||
// "oldname\x00newname\x00" follows
|
||||
}
|
||||
|
||||
type linkIn struct {
|
||||
Oldnodeid uint64
|
||||
}
|
||||
|
||||
type setattrInCommon struct {
|
||||
type setattrIn struct {
|
||||
Valid uint32
|
||||
_ uint32
|
||||
Fh uint64
|
||||
Size uint64
|
||||
LockOwner uint64 // unused on OS X?
|
||||
LockOwner uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Unused2 uint64
|
||||
@ -546,14 +557,14 @@ type releaseIn struct {
|
||||
Fh uint64
|
||||
Flags uint32
|
||||
ReleaseFlags uint32
|
||||
LockOwner uint32
|
||||
LockOwner uint64
|
||||
}
|
||||
|
||||
type flushIn struct {
|
||||
Fh uint64
|
||||
FlushFlags uint32
|
||||
_ uint32
|
||||
LockOwner uint64
|
||||
Fh uint64
|
||||
_ uint32
|
||||
_ uint32
|
||||
LockOwner uint64
|
||||
}
|
||||
|
||||
type readIn struct {
|
||||
@ -643,29 +654,70 @@ type fsyncIn struct {
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type setxattrInCommon struct {
|
||||
type setxattrIn struct {
|
||||
Size uint32
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
func (setxattrInCommon) position() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type getxattrInCommon struct {
|
||||
type getxattrIn struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func (getxattrInCommon) position() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type getxattrOut struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
// The LockFlags are passed in LockRequest or LockWaitRequest.
|
||||
type LockFlags uint32
|
||||
|
||||
const (
|
||||
// BSD-style flock lock (not POSIX lock)
|
||||
LockFlock LockFlags = 1 << 0
|
||||
)
|
||||
|
||||
var lockFlagNames = []flagName{
|
||||
{uint32(LockFlock), "LockFlock"},
|
||||
}
|
||||
|
||||
func (fl LockFlags) String() string {
|
||||
return flagString(uint32(fl), lockFlagNames)
|
||||
}
|
||||
|
||||
type LockType uint32
|
||||
|
||||
const (
|
||||
// It seems FreeBSD FUSE passes these through using its local
|
||||
// values, not whatever Linux enshrined into the protocol. It's
|
||||
// unclear what the intended behavior is.
|
||||
|
||||
LockRead LockType = unix.F_RDLCK
|
||||
LockWrite LockType = unix.F_WRLCK
|
||||
LockUnlock LockType = unix.F_UNLCK
|
||||
)
|
||||
|
||||
var lockTypeNames = map[LockType]string{
|
||||
LockRead: "LockRead",
|
||||
LockWrite: "LockWrite",
|
||||
LockUnlock: "LockUnlock",
|
||||
}
|
||||
|
||||
func (l LockType) String() string {
|
||||
s, ok := lockTypeNames[l]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("LockType(%d)", l)
|
||||
}
|
||||
|
||||
type fileLock struct {
|
||||
Start uint64
|
||||
End uint64
|
||||
Type uint32
|
||||
PID uint32
|
||||
}
|
||||
|
||||
type lkIn struct {
|
||||
Fh uint64
|
||||
Owner uint64
|
||||
@ -674,15 +726,6 @@ type lkIn struct {
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func lkInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(lkIn{}.LkFlags)
|
||||
default:
|
||||
return unsafe.Sizeof(lkIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type lkOut struct {
|
||||
Lk fileLock
|
||||
}
|
||||
@ -702,12 +745,13 @@ type initIn struct {
|
||||
const initInSize = int(unsafe.Sizeof(initIn{}))
|
||||
|
||||
type initOut struct {
|
||||
Major uint32
|
||||
Minor uint32
|
||||
MaxReadahead uint32
|
||||
Flags uint32
|
||||
Unused uint32
|
||||
MaxWrite uint32
|
||||
Major uint32
|
||||
Minor uint32
|
||||
MaxReadahead uint32
|
||||
Flags uint32
|
||||
MaxBackground uint16
|
||||
CongestionThreshold uint16
|
||||
MaxWrite uint32
|
||||
}
|
||||
|
||||
type interruptIn struct {
|
||||
@ -748,7 +792,6 @@ type dirent struct {
|
||||
Off uint64
|
||||
Namelen uint32
|
||||
Type uint32
|
||||
Name [0]byte
|
||||
}
|
||||
|
||||
const direntSize = 8 + 8 + 4 + 4
|
||||
@ -757,6 +800,8 @@ const (
|
||||
notifyCodePoll int32 = 1
|
||||
notifyCodeInvalInode int32 = 2
|
||||
notifyCodeInvalEntry int32 = 3
|
||||
notifyCodeStore int32 = 4
|
||||
notifyCodeRetrieve int32 = 5
|
||||
)
|
||||
|
||||
type notifyInvalInodeOut struct {
|
||||
@ -770,3 +815,101 @@ type notifyInvalEntryOut struct {
|
||||
Namelen uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type notifyStoreOut struct {
|
||||
Nodeid uint64
|
||||
Offset uint64
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type notifyRetrieveOut struct {
|
||||
NotifyUnique uint64
|
||||
Nodeid uint64
|
||||
Offset uint64
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type notifyRetrieveIn struct {
|
||||
// matches writeIn
|
||||
|
||||
_ uint64
|
||||
Offset uint64
|
||||
Size uint32
|
||||
_ uint32
|
||||
_ uint64
|
||||
_ uint64
|
||||
}
|
||||
|
||||
// PollFlags are passed in PollRequest.Flags
|
||||
type PollFlags uint32
|
||||
|
||||
const (
|
||||
// PollScheduleNotify requests that a poll notification is done
|
||||
// once the node is ready.
|
||||
PollScheduleNotify PollFlags = 1 << 0
|
||||
)
|
||||
|
||||
var pollFlagNames = []flagName{
|
||||
{uint32(PollScheduleNotify), "PollScheduleNotify"},
|
||||
}
|
||||
|
||||
func (fl PollFlags) String() string {
|
||||
return flagString(uint32(fl), pollFlagNames)
|
||||
}
|
||||
|
||||
type PollEvents uint32
|
||||
|
||||
const (
|
||||
PollIn PollEvents = 0x0000_0001
|
||||
PollPriority PollEvents = 0x0000_0002
|
||||
PollOut PollEvents = 0x0000_0004
|
||||
PollError PollEvents = 0x0000_0008
|
||||
PollHangup PollEvents = 0x0000_0010
|
||||
// PollInvalid doesn't seem to be used in the FUSE protocol.
|
||||
PollInvalid PollEvents = 0x0000_0020
|
||||
PollReadNormal PollEvents = 0x0000_0040
|
||||
PollReadOutOfBand PollEvents = 0x0000_0080
|
||||
PollWriteNormal PollEvents = 0x0000_0100
|
||||
PollWriteOutOfBand PollEvents = 0x0000_0200
|
||||
PollMessage PollEvents = 0x0000_0400
|
||||
PollReadHangup PollEvents = 0x0000_2000
|
||||
|
||||
DefaultPollMask = PollIn | PollOut | PollReadNormal | PollWriteNormal
|
||||
)
|
||||
|
||||
var pollEventNames = []flagName{
|
||||
{uint32(PollIn), "PollIn"},
|
||||
{uint32(PollPriority), "PollPriority"},
|
||||
{uint32(PollOut), "PollOut"},
|
||||
{uint32(PollError), "PollError"},
|
||||
{uint32(PollHangup), "PollHangup"},
|
||||
{uint32(PollInvalid), "PollInvalid"},
|
||||
{uint32(PollReadNormal), "PollReadNormal"},
|
||||
{uint32(PollReadOutOfBand), "PollReadOutOfBand"},
|
||||
{uint32(PollWriteNormal), "PollWriteNormal"},
|
||||
{uint32(PollWriteOutOfBand), "PollWriteOutOfBand"},
|
||||
{uint32(PollMessage), "PollMessage"},
|
||||
{uint32(PollReadHangup), "PollReadHangup"},
|
||||
}
|
||||
|
||||
func (fl PollEvents) String() string {
|
||||
return flagString(uint32(fl), pollEventNames)
|
||||
}
|
||||
|
||||
type pollIn struct {
|
||||
Fh uint64
|
||||
Kh uint64
|
||||
Flags uint32
|
||||
Events uint32
|
||||
}
|
||||
|
||||
type pollOut struct {
|
||||
REvents uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type notifyPollWakeupOut struct {
|
||||
Kh uint64
|
||||
}
|
||||
|
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
@ -1,88 +0,0 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
Crtime_ uint64 // OS X only
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
CrtimeNsec uint32 // OS X only
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Flags_ uint32 // OS X only; see chflags(2)
|
||||
Blksize uint32
|
||||
padding uint32
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
a.Crtime_, a.CrtimeNsec = s, ns
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
a.Flags_ = f
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
|
||||
// OS X only
|
||||
Bkuptime_ uint64
|
||||
Chgtime_ uint64
|
||||
Crtime uint64
|
||||
BkuptimeNsec uint32
|
||||
ChgtimeNsec uint32
|
||||
CrtimeNsec uint32
|
||||
Flags_ uint32 // see chflags(2)
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec))
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec))
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return in.Flags_
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
|
||||
// OS X only
|
||||
Position uint32
|
||||
Padding uint32
|
||||
}
|
||||
|
||||
func (g *getxattrIn) position() uint32 {
|
||||
return g.Position
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
|
||||
// OS X only
|
||||
Position uint32
|
||||
Padding uint32
|
||||
}
|
||||
|
||||
func (s *setxattrIn) position() uint32 {
|
||||
return s.Position
|
||||
}
|
57
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
57
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
@ -1,62 +1,5 @@
|
||||
package fuse
|
||||
|
||||
import "time"
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Blksize uint32
|
||||
padding uint32
|
||||
}
|
||||
|
||||
func (a *attr) Crtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
// ignored on freebsd
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
// ignored on freebsd
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
}
|
||||
|
57
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
57
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
@ -1,54 +1,5 @@
|
||||
package fuse
|
||||
|
||||
import "time"
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Blksize uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func (a *attr) Crtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
// Ignored on Linux.
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
// Ignored on Linux.
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
// on amd64, the 32-bit O_LARGEFILE flag is always seen;
|
||||
// on i386, the flag probably depends on the app
|
||||
@ -60,11 +11,3 @@ func openFlags(flags uint32) OpenFlags {
|
||||
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
}
|
||||
|
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
@ -1 +0,0 @@
|
||||
package fuse
|
4
vendor/bazil.org/fuse/go.mod
generated
vendored
4
vendor/bazil.org/fuse/go.mod
generated
vendored
@ -3,6 +3,10 @@ module bazil.org/fuse
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect
|
||||
github.com/stephens2424/writerset v1.0.2 // indirect
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66 // indirect
|
||||
)
|
||||
|
35
vendor/bazil.org/fuse/go.sum
generated
vendored
35
vendor/bazil.org/fuse/go.sum
generated
vendored
@ -1,4 +1,39 @@
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813 h1:NgO45/5mBLRVfiXerEFzH6ikcZ7DNRPS639xFg3ENzU=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/stephens2424/writerset v1.0.2 h1:znRLgU6g8RS5euYRcy004XeE4W+Tu44kALzy7ghPif8=
|
||||
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66 h1:EqVh9e7SxFnv93tWN3j33DpFAMKlFhaqI736Yp4kynk=
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
6
vendor/bazil.org/fuse/mount.go
generated
vendored
6
vendor/bazil.org/fuse/mount.go
generated
vendored
@ -9,11 +9,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrOSXFUSENotFound is returned from Mount when the OSXFUSE
|
||||
// installation is not detected.
|
||||
//
|
||||
// Only happens on OS X. Make sure OSXFUSE is installed, or see
|
||||
// OSXFUSELocations for customization.
|
||||
// Deprecated: Never used, OS X remnant.
|
||||
ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE")
|
||||
)
|
||||
|
||||
|
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
@ -1,208 +0,0 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoAvail = errors.New("no available fuse devices")
|
||||
errNotLoaded = errors.New("osxfuse is not loaded")
|
||||
)
|
||||
|
||||
func loadOSXFUSE(bin string) error {
|
||||
cmd := exec.Command(bin)
|
||||
cmd.Dir = "/"
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
return err
|
||||
}
|
||||
|
||||
func openOSXFUSEDev(devPrefix string) (*os.File, error) {
|
||||
var f *os.File
|
||||
var err error
|
||||
for i := uint64(0); ; i++ {
|
||||
path := devPrefix + strconv.FormatUint(i, 10)
|
||||
f, err = os.OpenFile(path, os.O_RDWR, 0000)
|
||||
if os.IsNotExist(err) {
|
||||
if i == 0 {
|
||||
// not even the first device was found -> fuse is not loaded
|
||||
return nil, errNotLoaded
|
||||
}
|
||||
|
||||
// we've run out of kernel-provided devices
|
||||
return nil, errNoAvail
|
||||
}
|
||||
|
||||
if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY {
|
||||
// try the next one
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) {
|
||||
var noMountpointPrefix = helperName + `: `
|
||||
const noMountpointSuffix = `: No such file or directory`
|
||||
return func(line string) (ignore bool) {
|
||||
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||
// re-extract it from the error message in case some layer
|
||||
// changed the path
|
||||
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||
err := &MountpointDoesNotExistError{
|
||||
Path: mountpoint,
|
||||
}
|
||||
select {
|
||||
case errCh <- err:
|
||||
return true
|
||||
default:
|
||||
// not the first error; fall back to logging it
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isBoringMountOSXFUSEError returns whether the Wait error is
|
||||
// uninteresting; exit status 64 is.
|
||||
func isBoringMountOSXFUSEError(err error) bool {
|
||||
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error {
|
||||
for k, v := range conf.options {
|
||||
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||
// Silly limitation but the mount helper does not
|
||||
// understand any escaping. See TestMountOptionCommaError.
|
||||
return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v)
|
||||
}
|
||||
}
|
||||
cmd := exec.Command(
|
||||
bin,
|
||||
"-o", conf.getOptions(),
|
||||
// Tell osxfuse-kext how large our buffer is. It must split
|
||||
// writes larger than this into multiple writes.
|
||||
//
|
||||
// OSXFUSE seems to ignore InitResponse.MaxWrite, and uses
|
||||
// this instead.
|
||||
"-o", "iosize="+strconv.FormatUint(maxWrite, 10),
|
||||
// refers to fd passed in cmd.ExtraFiles
|
||||
"3",
|
||||
dir,
|
||||
)
|
||||
cmd.ExtraFiles = []*os.File{f}
|
||||
cmd.Env = os.Environ()
|
||||
// OSXFUSE <3.3.0
|
||||
cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=")
|
||||
// OSXFUSE >=3.3.0
|
||||
cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=")
|
||||
|
||||
daemon := os.Args[0]
|
||||
if daemonVar != "" {
|
||||
cmd.Env = append(cmd.Env, daemonVar+"="+daemon)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("mount_osxfusefs: %v", err)
|
||||
}
|
||||
helperErrCh := make(chan error, 1)
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||
helperName := path.Base(bin)
|
||||
go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr)
|
||||
wg.Wait()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// see if we have a better error to report
|
||||
select {
|
||||
case helperErr := <-helperErrCh:
|
||||
// log the Wait error if it's not what we expected
|
||||
if !isBoringMountOSXFUSEError(err) {
|
||||
log.Printf("mount helper failed: %v", err)
|
||||
}
|
||||
// and now return what we grabbed from stderr as the real
|
||||
// error
|
||||
*errp = helperErr
|
||||
close(ready)
|
||||
return
|
||||
default:
|
||||
// nope, fall back to generic message
|
||||
}
|
||||
|
||||
*errp = fmt.Errorf("mount_osxfusefs: %v", err)
|
||||
close(ready)
|
||||
return
|
||||
}
|
||||
|
||||
*errp = nil
|
||||
close(ready)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||
locations := conf.osxfuseLocations
|
||||
if locations == nil {
|
||||
locations = []OSXFUSEPaths{
|
||||
OSXFUSELocationV3,
|
||||
OSXFUSELocationV2,
|
||||
}
|
||||
}
|
||||
for _, loc := range locations {
|
||||
if _, err := os.Stat(loc.Mount); os.IsNotExist(err) {
|
||||
// try the other locations
|
||||
continue
|
||||
}
|
||||
|
||||
f, err := openOSXFUSEDev(loc.DevicePrefix)
|
||||
if err == errNotLoaded {
|
||||
err = loadOSXFUSE(loc.Load)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// try again
|
||||
f, err = openOSXFUSEDev(loc.DevicePrefix)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
return nil, ErrOSXFUSENotFound
|
||||
}
|
6
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
6
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
@ -47,7 +47,7 @@ func isBoringMountFusefsError(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||
func mount(dir string, conf *mountConfig) (*os.File, error) {
|
||||
for k, v := range conf.options {
|
||||
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||
// Silly limitation but the mount helper does not
|
||||
@ -56,9 +56,8 @@ func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000)
|
||||
f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0o000)
|
||||
if err != nil {
|
||||
*errp = err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -106,6 +105,5 @@ func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*
|
||||
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||
}
|
||||
|
||||
close(ready)
|
||||
return f, nil
|
||||
}
|
||||
|
5
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
5
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
@ -55,10 +55,7 @@ func isBoringFusermountError(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) {
|
||||
// linux mount is never delayed
|
||||
close(ready)
|
||||
|
||||
func mount(dir string, conf *mountConfig) (fusefd *os.File, err error) {
|
||||
fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("socketpair error: %v", err)
|
||||
|
154
vendor/bazil.org/fuse/options.go
generated
vendored
154
vendor/bazil.org/fuse/options.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -12,10 +11,11 @@ func dummyOption(conf *mountConfig) error {
|
||||
// mountConfig holds the configuration for a mount operation.
|
||||
// Use it by passing MountOption values to Mount.
|
||||
type mountConfig struct {
|
||||
options map[string]string
|
||||
maxReadahead uint32
|
||||
initFlags InitFlags
|
||||
osxfuseLocations []OSXFUSEPaths
|
||||
options map[string]string
|
||||
maxReadahead uint32
|
||||
initFlags InitFlags
|
||||
maxBackground uint16
|
||||
congestionThreshold uint16
|
||||
}
|
||||
|
||||
func escapeComma(s string) string {
|
||||
@ -59,7 +59,6 @@ func FSName(name string) MountOption {
|
||||
// `fuse`. The type in a list of mounted file systems will look like
|
||||
// `fuse.foo`.
|
||||
//
|
||||
// OS X ignores this option.
|
||||
// FreeBSD ignores this option.
|
||||
func Subtype(fstype string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
@ -68,82 +67,40 @@ func Subtype(fstype string) MountOption {
|
||||
}
|
||||
}
|
||||
|
||||
// LocalVolume sets the volume to be local (instead of network),
|
||||
// changing the behavior of Finder, Spotlight, and such.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
// Deprecated: Ignored, OS X remnant.
|
||||
func LocalVolume() MountOption {
|
||||
return localVolume
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
// Deprecated: Ignored, OS X remnant.
|
||||
func VolumeName(name string) MountOption {
|
||||
return volumeName(name)
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
// NoAppleDouble makes OSXFUSE disallow files with names used by OS X
|
||||
// to store extended attributes on file systems that do not support
|
||||
// them natively.
|
||||
//
|
||||
// Such file names are:
|
||||
//
|
||||
// ._*
|
||||
// .DS_Store
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
// Deprecated: Ignored, OS X remnant.
|
||||
func NoAppleDouble() MountOption {
|
||||
return noAppleDouble
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
// other such information.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
// Deprecated: Ignored, OS X remnant.
|
||||
func NoAppleXattr() MountOption {
|
||||
return noAppleXattr
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
// NoBrowse makes OSXFUSE mark the volume as non-browsable, so that
|
||||
// Finder won't automatically browse it.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
// Deprecated: Ignored, OS X remnant.
|
||||
func NoBrowse() MountOption {
|
||||
return noBrowse
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates,
|
||||
// i.e. create calls for which the initiator explicitly set the O_EXCL flag.
|
||||
//
|
||||
// OSXFUSE expects all create calls to return EEXIST in case the file
|
||||
// already exists, regardless of whether O_EXCL was specified or not.
|
||||
// To ensure this behavior, it normally sets OpenExclusive for all
|
||||
// Create calls, regardless of whether the original call had it set.
|
||||
// For distributed filesystems, that may force every file create to be
|
||||
// a distributed consensus action, causing undesirable delays.
|
||||
//
|
||||
// This option makes the FUSE filesystem see the original flag value,
|
||||
// and better decide when to ensure global consensus.
|
||||
//
|
||||
// Note that returning EEXIST on existing file create is still
|
||||
// expected with OSXFUSE, regardless of the presence of the
|
||||
// OpenExclusive flag.
|
||||
//
|
||||
// For more information, see
|
||||
// https://github.com/osxfuse/osxfuse/issues/209
|
||||
//
|
||||
// OS X only. Others ignore this options.
|
||||
// Requires OSXFUSE 3.4.1 or newer.
|
||||
// Deprecated: Ignored, OS X remnant.
|
||||
func ExclCreate() MountOption {
|
||||
return exclCreate
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
// DaemonTimeout sets the time in seconds between a request and a reply before
|
||||
// the FUSE mount is declared dead.
|
||||
//
|
||||
// OS X and FreeBSD only. Others ignore this option.
|
||||
// FreeBSD only. Others ignore this option.
|
||||
func DaemonTimeout(name string) MountOption {
|
||||
return daemonTimeout(name)
|
||||
}
|
||||
@ -231,8 +188,7 @@ func WritebackCache() MountOption {
|
||||
}
|
||||
}
|
||||
|
||||
// OSXFUSEPaths describes the paths used by an installed OSXFUSE
|
||||
// version. See OSXFUSELocationV3 for typical values.
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
type OSXFUSEPaths struct {
|
||||
// Prefix for the device file. At mount time, an incrementing
|
||||
// number is suffixed until a free FUSE device is found.
|
||||
@ -247,7 +203,7 @@ type OSXFUSEPaths struct {
|
||||
DaemonVar string
|
||||
}
|
||||
|
||||
// Default paths for OSXFUSE. See OSXFUSELocations.
|
||||
// Deprecated: Not used, OS X remnant.
|
||||
var (
|
||||
OSXFUSELocationV3 = OSXFUSEPaths{
|
||||
DevicePrefix: "/dev/osxfuse",
|
||||
@ -263,24 +219,9 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
// OSXFUSELocations sets where to look for OSXFUSE files. The
|
||||
// arguments are all the possible locations. The previous locations
|
||||
// are replaced.
|
||||
//
|
||||
// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are
|
||||
// used.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
// Deprecated: Ignored, OS X remnant.
|
||||
func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
if len(paths) == 0 {
|
||||
return errors.New("must specify at least one location for OSXFUSELocations")
|
||||
}
|
||||
// replace previous values, but make a copy so there's no
|
||||
// worries about caller mutating their slice
|
||||
conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...)
|
||||
return nil
|
||||
}
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
// AllowNonEmptyMount allows the mounting over a non-empty directory.
|
||||
@ -294,3 +235,54 @@ func AllowNonEmptyMount() MountOption {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MaxBackground sets the maximum number of FUSE requests the kernel
|
||||
// will submit in the background. Background requests are used when an
|
||||
// immediate answer is not needed. This may help with request latency.
|
||||
//
|
||||
// On Linux, this can be adjusted on the fly with
|
||||
// /sys/fs/fuse/connections/CONN/max_background
|
||||
func MaxBackground(n uint16) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.maxBackground = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// CongestionThreshold sets the number of outstanding background FUSE
|
||||
// requests beyond which the kernel considers the filesystem
|
||||
// congested. This may help with request latency.
|
||||
//
|
||||
// On Linux, this can be adjusted on the fly with
|
||||
// /sys/fs/fuse/connections/CONN/congestion_threshold
|
||||
func CongestionThreshold(n uint16) MountOption {
|
||||
// TODO to test this, we'd have to figure out our connection id
|
||||
// and read /sys
|
||||
return func(conf *mountConfig) error {
|
||||
conf.congestionThreshold = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LockingFlock enables flock-based (BSD) locking. This is mostly
|
||||
// useful for distributed filesystems with global locking. Without
|
||||
// this, kernel manages local locking automatically.
|
||||
func LockingFlock() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.initFlags |= InitFlockLocks
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LockingPOSIX enables flock-based (BSD) locking. This is mostly
|
||||
// useful for distributed filesystems with global locking. Without
|
||||
// this, kernel manages local locking automatically.
|
||||
//
|
||||
// Beware POSIX locks are a broken API with unintuitive behavior for
|
||||
// callers.
|
||||
func LockingPOSIX() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.initFlags |= InitPOSIXLocks
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
40
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
40
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
conf.options["local"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["volname"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["daemon_timeout"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
conf.options["noapplexattr"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
conf.options["noappledouble"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
conf.options["excl_create"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func noBrowse(conf *mountConfig) error {
|
||||
conf.options["nobrowse"] = ""
|
||||
return nil
|
||||
}
|
24
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
24
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
@ -1,32 +1,8 @@
|
||||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["timeout"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noBrowse(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
24
vendor/bazil.org/fuse/options_linux.go
generated
vendored
24
vendor/bazil.org/fuse/options_linux.go
generated
vendored
@ -1,29 +1,5 @@
|
||||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noBrowse(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
42
vendor/bazil.org/fuse/protocol.go
generated
vendored
42
vendor/bazil.org/fuse/protocol.go
generated
vendored
@ -26,50 +26,56 @@ func (a Protocol) GE(b Protocol) bool {
|
||||
(a.Major == b.Major && a.Minor >= b.Minor)
|
||||
}
|
||||
|
||||
func (a Protocol) is79() bool {
|
||||
return a.GE(Protocol{7, 9})
|
||||
}
|
||||
|
||||
// HasAttrBlockSize returns whether Attr.BlockSize is respected by the
|
||||
// kernel.
|
||||
//
|
||||
// Deprecated: Guaranteed to be true with our minimum supported
|
||||
// protocol version.
|
||||
func (a Protocol) HasAttrBlockSize() bool {
|
||||
return a.is79()
|
||||
return true
|
||||
}
|
||||
|
||||
// HasReadWriteFlags returns whether ReadRequest/WriteRequest
|
||||
// fields Flags and FileFlags are valid.
|
||||
//
|
||||
// Deprecated: Guaranteed to be true with our minimum supported
|
||||
// protocol version.
|
||||
func (a Protocol) HasReadWriteFlags() bool {
|
||||
return a.is79()
|
||||
return true
|
||||
}
|
||||
|
||||
// HasGetattrFlags returns whether GetattrRequest field Flags is
|
||||
// valid.
|
||||
//
|
||||
// Deprecated: Guaranteed to be true with our minimum supported
|
||||
// protocol version.
|
||||
func (a Protocol) HasGetattrFlags() bool {
|
||||
return a.is79()
|
||||
}
|
||||
|
||||
func (a Protocol) is710() bool {
|
||||
return a.GE(Protocol{7, 10})
|
||||
return true
|
||||
}
|
||||
|
||||
// HasOpenNonSeekable returns whether OpenResponse field Flags flag
|
||||
// OpenNonSeekable is supported.
|
||||
//
|
||||
// Deprecated: Guaranteed to be true with our minimum supported
|
||||
// protocol version.
|
||||
func (a Protocol) HasOpenNonSeekable() bool {
|
||||
return a.is710()
|
||||
}
|
||||
|
||||
func (a Protocol) is712() bool {
|
||||
return a.GE(Protocol{7, 12})
|
||||
return true
|
||||
}
|
||||
|
||||
// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest
|
||||
// field Umask is valid.
|
||||
//
|
||||
// Deprecated: Guaranteed to be true with our minimum supported
|
||||
// protocol version.
|
||||
func (a Protocol) HasUmask() bool {
|
||||
return a.is712()
|
||||
return true
|
||||
}
|
||||
|
||||
// HasInvalidate returns whether InvalidateNode/InvalidateEntry are
|
||||
// supported.
|
||||
//
|
||||
// Deprecated: Guaranteed to be true with our minimum supported
|
||||
// protocol version.
|
||||
func (a Protocol) HasInvalidate() bool {
|
||||
return a.is712()
|
||||
return true
|
||||
}
|
||||
|
42
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
42
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@ -61,24 +61,14 @@ var (
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var (
|
||||
defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
subscribeClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
)
|
||||
var defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
@ -150,7 +140,7 @@ func testOnGCE() bool {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.LookupHost("metadata.google.internal")
|
||||
addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
@ -205,10 +195,9 @@ func systemInfoSuggestsGCE() bool {
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||
// ResponseHeaderTimeout).
|
||||
// Subscribe calls Client.Subscribe on the default client.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
return subscribeClient.Subscribe(suffix, fn)
|
||||
return defaultClient.Subscribe(suffix, fn)
|
||||
}
|
||||
|
||||
// Get calls Client.Get on the default client.
|
||||
@ -279,9 +268,14 @@ type Client struct {
|
||||
hc *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||
// will use the given http.Client instead of the default client.
|
||||
// NewClient returns a Client that can be used to fetch metadata.
|
||||
// Returns the client that uses the specified http.Client for HTTP requests.
|
||||
// If nil is specified, returns the default client.
|
||||
func NewClient(c *http.Client) *Client {
|
||||
if c == nil {
|
||||
return defaultClient
|
||||
}
|
||||
|
||||
return &Client{hc: c}
|
||||
}
|
||||
|
||||
|
69
vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go
generated
vendored
69
vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
package azblob
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
|
||||
|
||||
const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadInt32(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadUint32(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadInt64(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadUint64(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
238
vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go
generated
vendored
Normal file
238
vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
guuid "github.com/google/uuid"
|
||||
)
|
||||
|
||||
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
|
||||
// This allows us to provide a local implementation that fakes the server for hermetic testing.
|
||||
type blockWriter interface {
|
||||
StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte) (*BlockBlobStageBlockResponse, error)
|
||||
CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error)
|
||||
}
|
||||
|
||||
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
|
||||
// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably
|
||||
// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The
|
||||
// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload
|
||||
// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works
|
||||
// well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can
|
||||
// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model).
|
||||
// We can even provide a utility to dial this number in for customer networks to optimize their copies.
|
||||
func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) {
|
||||
o.defaults()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
cp := &copier{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
reader: from,
|
||||
to: to,
|
||||
id: newID(),
|
||||
o: o,
|
||||
ch: make(chan copierChunk, 1),
|
||||
errCh: make(chan error, 1),
|
||||
buffers: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, o.BufferSize)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Starts the pools of concurrent writers.
|
||||
cp.wg.Add(o.MaxBuffers)
|
||||
for i := 0; i < o.MaxBuffers; i++ {
|
||||
go cp.writer()
|
||||
}
|
||||
|
||||
// Send all our chunks until we get an error.
|
||||
var err error
|
||||
for {
|
||||
if err = cp.sendChunk(); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// If the error is not EOF, then we have a problem.
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Close out our upload.
|
||||
if err := cp.close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cp.result, nil
|
||||
}
|
||||
|
||||
// copier streams a file via chunks in parallel from a reader representing a file.
|
||||
// Do not use directly, instead use copyFromReader().
|
||||
type copier struct {
|
||||
// ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case,
|
||||
// the copier has the lifetime of a function call, so its fine.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// reader is the source to be written to storage.
|
||||
reader io.Reader
|
||||
// to is the location we are writing our chunks to.
|
||||
to blockWriter
|
||||
|
||||
id *id
|
||||
o UploadStreamToBlockBlobOptions
|
||||
|
||||
// num is the current chunk we are on.
|
||||
num int32
|
||||
// ch is used to pass the next chunk of data from our reader to one of the writers.
|
||||
ch chan copierChunk
|
||||
// errCh is used to hold the first error from our concurrent writers.
|
||||
errCh chan error
|
||||
// wg provides a count of how many writers we are waiting to finish.
|
||||
wg sync.WaitGroup
|
||||
// buffers provides a pool of chunks that can be reused.
|
||||
buffers sync.Pool
|
||||
|
||||
// result holds the final result from blob storage after we have submitted all chunks.
|
||||
result *BlockBlobCommitBlockListResponse
|
||||
}
|
||||
|
||||
type copierChunk struct {
|
||||
buffer []byte
|
||||
id string
|
||||
}
|
||||
|
||||
// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error
|
||||
// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier.
|
||||
func (c *copier) getErr() error {
|
||||
select {
|
||||
case err := <-c.errCh:
|
||||
return err
|
||||
default:
|
||||
}
|
||||
return c.ctx.Err()
|
||||
}
|
||||
|
||||
// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel.
|
||||
// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF.
|
||||
func (c *copier) sendChunk() error {
|
||||
if err := c.getErr(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buffer := c.buffers.Get().([]byte)
|
||||
n, err := io.ReadFull(c.reader, buffer)
|
||||
switch {
|
||||
case err == nil && n == 0:
|
||||
return nil
|
||||
case err == nil:
|
||||
c.ch <- copierChunk{
|
||||
buffer: buffer[0:n],
|
||||
id: c.id.next(),
|
||||
}
|
||||
return nil
|
||||
case err != nil && (err == io.EOF || err == io.ErrUnexpectedEOF) && n == 0:
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
c.ch <- copierChunk{
|
||||
buffer: buffer[0:n],
|
||||
id: c.id.next(),
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
if err := c.getErr(); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// writer writes chunks sent on a channel.
|
||||
func (c *copier) writer() {
|
||||
defer c.wg.Done()
|
||||
|
||||
for chunk := range c.ch {
|
||||
if err := c.write(chunk); err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
select {
|
||||
case c.errCh <- err:
|
||||
c.cancel()
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// write uploads a chunk to blob storage.
|
||||
func (c *copier) write(chunk copierChunk) error {
|
||||
defer c.buffers.Put(chunk.buffer)
|
||||
|
||||
if err := c.ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer), LeaseAccessConditions{}, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// close commits our blocks to blob storage and closes our writer.
|
||||
func (c *copier) close() error {
|
||||
close(c.ch)
|
||||
c.wg.Wait()
|
||||
|
||||
if err := c.getErr(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions)
|
||||
return err
|
||||
}
|
||||
|
||||
// id allows the creation of unique IDs based on UUID4 + an int32. This autoincrements.
|
||||
type id struct {
|
||||
u [64]byte
|
||||
num uint32
|
||||
all []string
|
||||
}
|
||||
|
||||
// newID constructs a new id.
|
||||
func newID() *id {
|
||||
uu := guuid.New()
|
||||
u := [64]byte{}
|
||||
copy(u[:], uu[:])
|
||||
return &id{u: u}
|
||||
}
|
||||
|
||||
// next returns the next ID. This is not thread-safe.
|
||||
func (id *id) next() string {
|
||||
defer func() { id.num++ }()
|
||||
|
||||
binary.BigEndian.PutUint32((id.u[len(guuid.UUID{}):]), id.num)
|
||||
str := base64.StdEncoding.EncodeToString(id.u[:])
|
||||
id.all = append(id.all, str)
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return.
|
||||
// The value is only valid until the next time next() is called.
|
||||
func (id *id) issued() []string {
|
||||
return id.all
|
||||
}
|
203
vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
generated
vendored
203
vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
generated
vendored
@ -301,6 +301,10 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
|
||||
return errors.New("ChunkSize cannot be 0")
|
||||
}
|
||||
|
||||
if o.Parallelism == 0 {
|
||||
o.Parallelism = 5 // default Parallelism
|
||||
}
|
||||
|
||||
// Prepare and do parallel operations.
|
||||
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
|
||||
operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently
|
||||
@ -309,9 +313,6 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
|
||||
defer cancel()
|
||||
|
||||
// Create the goroutines that process each operation (in parallel).
|
||||
if o.Parallelism == 0 {
|
||||
o.Parallelism = 5 // default Parallelism
|
||||
}
|
||||
for g := uint16(0); g < o.Parallelism; g++ {
|
||||
//grIndex := g
|
||||
go func() {
|
||||
@ -352,192 +353,44 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const _1MiB = 1024 * 1024
|
||||
|
||||
type UploadStreamToBlockBlobOptions struct {
|
||||
BufferSize int
|
||||
// BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB.
|
||||
BufferSize int
|
||||
// MaxBuffers defines the number of simultaneous uploads will be performed to upload the file.
|
||||
MaxBuffers int
|
||||
BlobHTTPHeaders BlobHTTPHeaders
|
||||
Metadata Metadata
|
||||
AccessConditions BlobAccessConditions
|
||||
}
|
||||
|
||||
func (u *UploadStreamToBlockBlobOptions) defaults() {
|
||||
if u.MaxBuffers == 0 {
|
||||
u.MaxBuffers = 1
|
||||
}
|
||||
|
||||
if u.BufferSize < _1MiB {
|
||||
u.BufferSize = _1MiB
|
||||
}
|
||||
}
|
||||
|
||||
// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL.
|
||||
// A Context deadline or cancellation will cause this to error.
|
||||
func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL,
|
||||
o UploadStreamToBlockBlobOptions) (CommonResponse, error) {
|
||||
result, err := uploadStream(ctx, reader,
|
||||
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
|
||||
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
|
||||
o.defaults()
|
||||
|
||||
result, err := copyFromReader(ctx, reader, blockBlobURL, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.(CommonResponse), nil
|
||||
}
|
||||
|
||||
type uploadStreamToBlockBlobOptions struct {
|
||||
b BlockBlobURL
|
||||
o UploadStreamToBlockBlobOptions
|
||||
blockIDPrefix uuid // UUID used with all blockIDs
|
||||
maxBlockNum uint32 // defaults to 0
|
||||
firstBlock []byte // Used only if maxBlockNum is 0
|
||||
}
|
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error {
|
||||
if num == 0 {
|
||||
t.firstBlock = buffer
|
||||
|
||||
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
|
||||
// If the payload is exactly the same size as the buffer, there may be more content coming in.
|
||||
if len(buffer) < t.o.BufferSize {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Else, upload a staged block...
|
||||
atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
|
||||
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
|
||||
if startVal < num {
|
||||
return num, nil
|
||||
}
|
||||
return startVal, nil
|
||||
})
|
||||
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
|
||||
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) {
|
||||
// If the first block had the exact same size as the buffer
|
||||
// we would have staged it as a block thinking that there might be more data coming
|
||||
if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize {
|
||||
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
|
||||
return t.b.Upload(ctx, bytes.NewReader(t.firstBlock),
|
||||
t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
|
||||
}
|
||||
// Multiple blocks staged, commit them all now
|
||||
blockID := newUuidBlockID(t.blockIDPrefix)
|
||||
blockIDs := make([]string, t.maxBlockNum+1)
|
||||
for bn := uint32(0); bn <= t.maxBlockNum; bn++ {
|
||||
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
|
||||
}
|
||||
return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type iTransfer interface {
|
||||
start(ctx context.Context) (interface{}, error)
|
||||
chunk(ctx context.Context, num uint32, buffer []byte) error
|
||||
end(ctx context.Context) (interface{}, error)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version.
|
||||
type UploadStreamOptions struct {
|
||||
MaxBuffers int
|
||||
BufferSize int
|
||||
}
|
||||
|
||||
type firstErr struct {
|
||||
lock sync.Mutex
|
||||
finalError error
|
||||
}
|
||||
|
||||
func (fe *firstErr) set(err error) {
|
||||
fe.lock.Lock()
|
||||
if fe.finalError == nil {
|
||||
fe.finalError = err
|
||||
}
|
||||
fe.lock.Unlock()
|
||||
}
|
||||
|
||||
func (fe *firstErr) get() (err error) {
|
||||
fe.lock.Lock()
|
||||
err = fe.finalError
|
||||
fe.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
|
||||
firstErr := firstErr{}
|
||||
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
|
||||
defer cancel()
|
||||
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
|
||||
type OutgoingMsg struct {
|
||||
chunkNum uint32
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
// Create a channel to hold the buffers usable for incoming datsa
|
||||
incoming := make(chan []byte, o.MaxBuffers)
|
||||
outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers
|
||||
if result, err := t.start(ctx); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
numBuffers := 0 // The number of buffers & out going goroutines created so far
|
||||
injectBuffer := func() {
|
||||
// For each Buffer, create it and a goroutine to upload it
|
||||
incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it
|
||||
numBuffers++
|
||||
go func() {
|
||||
for outgoingMsg := range outgoing {
|
||||
// Upload the outgoing buffer
|
||||
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
|
||||
wg.Done() // Indicate this buffer was sent
|
||||
if nil != err {
|
||||
// NOTE: finalErr could be assigned to multiple times here which is OK,
|
||||
// some error will be returned.
|
||||
firstErr.set(err)
|
||||
cancel()
|
||||
}
|
||||
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
|
||||
}
|
||||
}()
|
||||
}
|
||||
injectBuffer() // Create our 1st buffer & outgoing goroutine
|
||||
|
||||
// This goroutine grabs a buffer, reads from the stream into the buffer,
|
||||
// and inserts the buffer into the outgoing channel to be uploaded
|
||||
for c := uint32(0); true; c++ { // Iterate once per chunk
|
||||
var buffer []byte
|
||||
if numBuffers < o.MaxBuffers {
|
||||
select {
|
||||
// We're not at max buffers, see if a previously-created buffer is available
|
||||
case buffer = <-incoming:
|
||||
break
|
||||
default:
|
||||
// No buffer available; inject a new buffer & go routine to process it
|
||||
injectBuffer()
|
||||
buffer = <-incoming // Grab the just-injected buffer
|
||||
}
|
||||
} else {
|
||||
// We are at max buffers, block until we get to reuse one
|
||||
buffer = <-incoming
|
||||
}
|
||||
n, err := io.ReadFull(reader, buffer)
|
||||
if err != nil { // Less than len(buffer) bytes were read
|
||||
buffer = buffer[:n] // Make slice match the # of read bytes
|
||||
}
|
||||
if len(buffer) > 0 {
|
||||
// Buffer not empty, upload it
|
||||
wg.Add(1) // We're posting a buffer to be sent
|
||||
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
|
||||
}
|
||||
if err != nil { // The reader is done, no more outgoing buffers
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
|
||||
} else {
|
||||
firstErr.set(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
|
||||
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
|
||||
wg.Wait() // Wait for all pending outgoing messages to complete
|
||||
err := firstErr.get()
|
||||
if err == nil {
|
||||
// If no error, after all blocks uploaded, commit them to the blob & return the result
|
||||
return t.end(ctx)
|
||||
}
|
||||
return nil, err
|
||||
MaxBuffers int
|
||||
}
|
||||
|
14
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
14
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
@ -42,6 +42,10 @@ func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
|
||||
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return ab.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
|
||||
@ -49,6 +53,7 @@ func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata
|
||||
return ab.abClient.Create(ctx, 0, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil)
|
||||
}
|
||||
|
||||
@ -64,8 +69,11 @@ func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac
|
||||
return nil, err
|
||||
}
|
||||
return ab.abClient.AppendBlock(ctx, body, count, nil,
|
||||
transactionalMD5, ac.LeaseAccessConditions.pointers(),
|
||||
transactionalMD5,
|
||||
nil, // CRC
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
@ -76,7 +84,9 @@ func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.UR
|
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers()
|
||||
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
|
||||
transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||
transactionalMD5, nil, nil, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||
}
|
||||
|
17
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
generated
vendored
17
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
generated
vendored
@ -29,6 +29,10 @@ func (b BlobURL) String() string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return b.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
|
||||
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
|
||||
return NewBlobURL(b.blobClient.URL(), p)
|
||||
@ -68,7 +72,8 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
dr, err := b.blobClient.Download(ctx, nil, nil,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
|
||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -103,7 +108,7 @@ func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
|
||||
// does not update the blob's ETag.
|
||||
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
|
||||
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
|
||||
return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers())
|
||||
return b.blobClient.SetTier(ctx, tier, nil, RehydratePriorityNone, nil, lac.pointers())
|
||||
}
|
||||
|
||||
// GetBlobProperties returns the blob's properties.
|
||||
@ -111,6 +116,7 @@ func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAcce
|
||||
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
@ -129,6 +135,7 @@ func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobA
|
||||
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
@ -139,7 +146,9 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA
|
||||
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
||||
// performance hit.
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
|
||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
|
||||
}
|
||||
|
||||
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
||||
@ -202,7 +211,7 @@ func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata
|
||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
||||
|
||||
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince,
|
||||
AccessTierNone, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince,
|
||||
srcIfMatchETag, srcIfNoneMatchETag,
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
||||
dstIfMatchETag, dstIfNoneMatchETag,
|
||||
|
88
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
generated
vendored
88
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
generated
vendored
@ -5,9 +5,6 @@ import (
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
@ -48,6 +45,10 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
|
||||
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return bb.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// Upload creates a new block blob or overwrites an existing block blob.
|
||||
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
|
||||
// supported with Upload; the content of the existing blob is overwritten with the new content. To
|
||||
@ -61,10 +62,11 @@ func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTT
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb.bbClient.Upload(ctx, body, count, nil,
|
||||
return bb.bbClient.Upload(ctx, body, count, nil, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
|
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
AccessTierNone, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
nil)
|
||||
}
|
||||
|
||||
@ -76,7 +78,9 @@ func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, bod
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil)
|
||||
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
nil)
|
||||
}
|
||||
|
||||
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
@ -84,7 +88,9 @@ func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, bod
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
||||
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
||||
@ -97,8 +103,10 @@ func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []str
|
||||
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
|
||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil,
|
||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
AccessTierNone,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
@ -108,55 +116,19 @@ func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType,
|
||||
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
|
||||
func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata,
|
||||
srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte) (*BlobCopyFromURLResponse, error) {
|
||||
|
||||
type BlockID [64]byte
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
||||
|
||||
func (blockID BlockID) ToBase64() string {
|
||||
return base64.StdEncoding.EncodeToString(blockID[:])
|
||||
}
|
||||
|
||||
func (blockID *BlockID) FromBase64(s string) error {
|
||||
*blockID = BlockID{} // Zero out the block ID
|
||||
_, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s))
|
||||
return err
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type uuidBlockID BlockID
|
||||
|
||||
func (ubi uuidBlockID) UUID() uuid {
|
||||
u := uuid{}
|
||||
copy(u[:], ubi[:len(u)])
|
||||
return u
|
||||
}
|
||||
|
||||
func (ubi uuidBlockID) Number() uint32 {
|
||||
return binary.BigEndian.Uint32(ubi[len(uuid{}):])
|
||||
}
|
||||
|
||||
func newUuidBlockID(u uuid) uuidBlockID {
|
||||
ubi := uuidBlockID{} // Create a new uuidBlockID
|
||||
copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
|
||||
// Block number defaults to 0
|
||||
return ubi
|
||||
}
|
||||
|
||||
func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID {
|
||||
copy(ubi[:len(u)], u[:])
|
||||
return ubi
|
||||
}
|
||||
|
||||
func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
|
||||
binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID
|
||||
return ubi // Return the passed-in copy
|
||||
}
|
||||
|
||||
func (ubi uuidBlockID) ToBase64() string {
|
||||
return BlockID(ubi).ToBase64()
|
||||
}
|
||||
|
||||
func (ubi *uuidBlockID) FromBase64(s string) error {
|
||||
return (*BlockID)(ubi).FromBase64(s)
|
||||
return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, AccessTierNone,
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince,
|
||||
srcIfMatchETag, srcIfNoneMatchETag,
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
||||
dstIfMatchETag, dstIfNoneMatchETag,
|
||||
dstLeaseID, nil, srcContentMD5)
|
||||
}
|
||||
|
4
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
4
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
@ -32,6 +32,10 @@ func (c ContainerURL) String() string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) {
|
||||
return c.client.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
|
||||
func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL {
|
||||
return NewContainerURL(c.URL(), p)
|
||||
|
20
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
20
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
@ -44,14 +44,19 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
|
||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return pb.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Create(ctx, 0, size, nil,
|
||||
return pb.pbClient.Create(ctx, 0, size, nil, PremiumPageBlobAccessTierNone,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
|
||||
metadata, ac.LeaseAccessConditions.pointers(),
|
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
|
||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
|
||||
}
|
||||
|
||||
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
||||
@ -65,9 +70,10 @@ func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.Rea
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil,
|
||||
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
@ -82,7 +88,9 @@ func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL,
|
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
|
||||
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||
}
|
||||
@ -95,6 +103,7 @@ func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64,
|
||||
return pb.pbClient.ClearPages(ctx, 0, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
|
||||
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
@ -125,6 +134,7 @@ func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count
|
||||
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
|
14
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
14
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
@ -38,6 +39,19 @@ func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInf
|
||||
return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil
|
||||
}
|
||||
|
||||
//TODO this was supposed to be generated
|
||||
//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion
|
||||
func NewKeyInfo(Start, Expiry time.Time) KeyInfo {
|
||||
return KeyInfo{
|
||||
Start: Start.UTC().Format(SASTimeFormat),
|
||||
Expiry: Expiry.UTC().Format(SASTimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
|
||||
return s.client.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the ServiceURL object.
|
||||
func (s ServiceURL) URL() url.URL {
|
||||
return s.client.URL()
|
||||
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
@ -1,3 +1,3 @@
|
||||
package azblob
|
||||
|
||||
const serviceLibVersion = "0.7"
|
||||
const serviceLibVersion = "0.10"
|
||||
|
13
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
generated
vendored
13
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
generated
vendored
@ -1,25 +1,26 @@
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly solaris
|
||||
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type mmf []byte
|
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
|
||||
prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
|
||||
prot, flags := unix.PROT_READ, unix.MAP_SHARED // Assume read-only
|
||||
if writable {
|
||||
prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED
|
||||
prot, flags = unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED
|
||||
}
|
||||
addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags)
|
||||
addr, err := unix.Mmap(int(file.Fd()), offset, length, prot, flags)
|
||||
return mmf(addr), err
|
||||
}
|
||||
|
||||
func (m *mmf) unmap() {
|
||||
err := syscall.Munmap(*m)
|
||||
err := unix.Munmap(*m)
|
||||
*m = nil
|
||||
if err != nil {
|
||||
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
|
||||
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
@ -240,6 +240,8 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||
} else {
|
||||
action = "NoRetry: net.Error and in the non-retriable list"
|
||||
}
|
||||
} else if err == io.ErrUnexpectedEOF {
|
||||
action = "Retry: unexpected EOF"
|
||||
} else {
|
||||
action = "NoRetry: unrecognized error"
|
||||
}
|
||||
|
10
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
10
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
@ -41,6 +41,7 @@ type RetryReaderOptions struct {
|
||||
MaxRetryRequests int
|
||||
doInjectError bool
|
||||
doInjectErrorRound int
|
||||
injectedError error
|
||||
|
||||
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||
NotifyFailedRead FailedReadNotifier
|
||||
@ -117,7 +118,11 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Injection mechanism for testing.
|
||||
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
||||
err = &net.DNSError{IsTemporary: true}
|
||||
if s.o.injectedError != nil {
|
||||
err = s.o.injectedError
|
||||
} else {
|
||||
err = &net.DNSError{IsTemporary: true}
|
||||
}
|
||||
}
|
||||
|
||||
// We successfully read data or end EOF.
|
||||
@ -134,7 +139,8 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
// Check the retry count and error code, and decide whether to retry.
|
||||
retriesExhausted := try >= s.o.MaxRetryRequests
|
||||
_, isNetError := err.(net.Error)
|
||||
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||
isUnexpectedEOF := err == io.ErrUnexpectedEOF
|
||||
willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||
|
||||
// Notify, for logging purposes, of any failures
|
||||
if s.o.NotifyFailedRead != nil {
|
||||
|
48
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
generated
vendored
48
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -25,11 +26,11 @@ const (
|
||||
func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) {
|
||||
ss := ""
|
||||
if !startTime.IsZero() {
|
||||
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||
ss = formatSASTimeWithDefaultFormat(&startTime)
|
||||
}
|
||||
se := ""
|
||||
if !expiryTime.IsZero() {
|
||||
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||
se = formatSASTimeWithDefaultFormat(&expiryTime)
|
||||
}
|
||||
sh := ""
|
||||
if !snapshotTime.IsZero() {
|
||||
@ -40,6 +41,37 @@ func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (st
|
||||
|
||||
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
|
||||
const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
|
||||
var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details.
|
||||
|
||||
// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ".
|
||||
func formatSASTimeWithDefaultFormat(t *time.Time) string {
|
||||
return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
|
||||
}
|
||||
|
||||
// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default.
|
||||
func formatSASTime(t *time.Time, format string) string {
|
||||
if format != "" {
|
||||
return t.Format(format)
|
||||
}
|
||||
return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
|
||||
}
|
||||
|
||||
// parseSASTimeString try to parse sas time string.
|
||||
func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) {
|
||||
for _, sasTimeFormat := range SASTimeFormats {
|
||||
t, err = time.Parse(sasTimeFormat, val)
|
||||
if err == nil {
|
||||
timeFormat = sasTimeFormat
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||
|
||||
@ -74,6 +106,10 @@ type SASQueryParameters struct {
|
||||
signedExpiry time.Time `param:"ske"`
|
||||
signedService string `param:"sks"`
|
||||
signedVersion string `param:"skv"`
|
||||
|
||||
// private member used for startTime and expiryTime formatting.
|
||||
stTimeFormat string
|
||||
seTimeFormat string
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) SignedOid() string {
|
||||
@ -202,9 +238,9 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool
|
||||
case "snapshot":
|
||||
p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val)
|
||||
case "st":
|
||||
p.startTime, _ = time.Parse(SASTimeFormat, val)
|
||||
p.startTime, p.stTimeFormat, _ = parseSASTimeString(val)
|
||||
case "se":
|
||||
p.expiryTime, _ = time.Parse(SASTimeFormat, val)
|
||||
p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val)
|
||||
case "sip":
|
||||
dashIndex := strings.Index(val, "-")
|
||||
if dashIndex == -1 {
|
||||
@ -268,10 +304,10 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
|
||||
v.Add("spr", string(p.protocol))
|
||||
}
|
||||
if !p.startTime.IsZero() {
|
||||
v.Add("st", p.startTime.Format(SASTimeFormat))
|
||||
v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat))
|
||||
}
|
||||
if !p.expiryTime.IsZero() {
|
||||
v.Add("se", p.expiryTime.Format(SASTimeFormat))
|
||||
v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat))
|
||||
}
|
||||
if len(p.ipRange.Start) > 0 {
|
||||
v.Add("sip", p.ipRange.String())
|
||||
|
142
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
142
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
@ -34,20 +34,26 @@ func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
|
||||
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
|
||||
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
|
||||
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
|
||||
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
|
||||
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
|
||||
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
|
||||
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be
|
||||
// validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active and
|
||||
// matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If
|
||||
// the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the
|
||||
// value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 -
|
||||
// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A
|
||||
// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this
|
||||
// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 -
|
||||
// Precondition Failed). encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in
|
||||
// the request. If not specified, encryption is performed with the root account encryption key. For more information,
|
||||
// see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided
|
||||
// encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm
|
||||
// used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the
|
||||
// x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
|
||||
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
@ -56,7 +62,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -68,7 +74,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
|
||||
}
|
||||
|
||||
// appendBlockPreparer prepares the AppendBlock request.
|
||||
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -83,6 +89,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if transactionalContentCrc64 != nil {
|
||||
req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
@ -92,6 +101,15 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
|
||||
if appendPosition != nil {
|
||||
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -128,33 +146,40 @@ func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pip
|
||||
//
|
||||
// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of
|
||||
// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must
|
||||
// be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the range of bytes that must be
|
||||
// read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
|
||||
// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
|
||||
// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
|
||||
// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
|
||||
// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
|
||||
// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
|
||||
// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
|
||||
// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
|
||||
// only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to
|
||||
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||
// matching value. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified
|
||||
// since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it
|
||||
// has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. encryptionKey is optional. Specifies the encryption key to use to encrypt the data
|
||||
// provided in the request. If not specified, encryption is performed with the root account encryption key. For more
|
||||
// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
|
||||
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
|
||||
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
|
||||
// if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the
|
||||
// resource's lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes
|
||||
// permitted for the append blob. If the Append Block operation would cause the blob to exceed that limit or if the
|
||||
// blob size is already greater than the value specified in this header, the request will fail with
|
||||
// MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). appendPosition is optional
|
||||
// conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. Append
|
||||
// Block will succeed only if the append position is equal to this number. If it is not, the request will fail with the
|
||||
// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this
|
||||
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
|
||||
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
|
||||
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
|
||||
// value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this header value to
|
||||
// operate only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify
|
||||
// this header value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch
|
||||
// is specify an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value
|
||||
// to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1
|
||||
// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -166,7 +191,7 @@ func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL
|
||||
}
|
||||
|
||||
// appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
|
||||
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -184,7 +209,22 @@ func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, cont
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
if sourceContentcrc64 != nil {
|
||||
req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64))
|
||||
}
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
@ -255,20 +295,25 @@ func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Respons
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
|
||||
// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not
|
||||
// specified, encryption is performed with the root account encryption key. For more information, see Encryption at
|
||||
// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
|
||||
// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
|
||||
// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
|
||||
// header is provided. ifModifiedSince is specify this header value to operate only on a blob if it has been modified
|
||||
// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has
|
||||
// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a
|
||||
// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is
|
||||
// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when
|
||||
// storage analytics logging is enabled.
|
||||
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -280,7 +325,7 @@ func (client appendBlobClient) Create(ctx context.Context, contentLength int64,
|
||||
}
|
||||
|
||||
// createPreparer prepares the Create request.
|
||||
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -317,6 +362,15 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
|
399
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go
generated
vendored
399
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go
generated
vendored
@ -6,13 +6,14 @@ package azblob
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// blobClient is the client for the Blob methods of the Azblob service.
|
||||
@ -339,25 +340,27 @@ func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.
|
||||
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
|
||||
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||
// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate
|
||||
// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
|
||||
// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
|
||||
// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
|
||||
// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCopyFromURLResponse, error) {
|
||||
// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob.
|
||||
// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the
|
||||
// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not
|
||||
// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a
|
||||
// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. leaseID is if specified, the
|
||||
// operation only succeeds if the resource's lease is active and matches this ID. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be
|
||||
// read from the copy source.
|
||||
func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string, sourceContentMD5 []byte) (*BlobCopyFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.copyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||
req, err := client.copyFromURLPreparer(copySource, timeout, metadata, tier, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID, sourceContentMD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -369,7 +372,7 @@ func (client blobClient) CopyFromURL(ctx context.Context, copySource string, tim
|
||||
}
|
||||
|
||||
// copyFromURLPreparer prepares the CopyFromURL request.
|
||||
func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string, sourceContentMD5 []byte) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -384,6 +387,9 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32,
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
if tier != AccessTierNone {
|
||||
req.Header.Set("x-ms-access-tier", string(tier))
|
||||
}
|
||||
if sourceIfModifiedSince != nil {
|
||||
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -416,6 +422,9 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32,
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
req.Header.Set("x-ms-requires-sync", "true")
|
||||
return req, nil
|
||||
}
|
||||
@ -440,21 +449,26 @@ func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline.
|
||||
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
|
||||
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and
|
||||
// matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||
// in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) {
|
||||
// Containers, Blobs, and Metadata for more information. encryptionKey is optional. Specifies the encryption key to use
|
||||
// to encrypt the data provided in the request. If not specified, encryption is performed with the root account
|
||||
// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the
|
||||
// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
|
||||
// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is
|
||||
// "AES256". Must be provided if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header
|
||||
// value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify
|
||||
// this header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is
|
||||
// specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to
|
||||
// operate only on blobs without a matching value. leaseID is if specified, the operation only succeeds if the
|
||||
// resource's lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||
req, err := client.createSnapshotPreparer(timeout, metadata, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -466,7 +480,7 @@ func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, met
|
||||
}
|
||||
|
||||
// createSnapshotPreparer prepares the CreateSnapshot request.
|
||||
func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -482,6 +496,15 @@ func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[str
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -619,20 +642,27 @@ func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Respo
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for
|
||||
// the range, as long as the range is less than or equal to 4 MB in size. ifModifiedSince is specify this header value
|
||||
// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this
|
||||
// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify
|
||||
// an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only
|
||||
// on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) {
|
||||
// the range, as long as the range is less than or equal to 4 MB in size. rangeGetContentCRC64 is when set to true and
|
||||
// specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less
|
||||
// than or equal to 4 MB in size. encryptionKey is optional. Specifies the encryption key to use to encrypt the data
|
||||
// provided in the request. If not specified, encryption is performed with the root account encryption key. For more
|
||||
// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
|
||||
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
|
||||
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
|
||||
// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a
|
||||
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -644,7 +674,7 @@ func (client blobClient) Download(ctx context.Context, snapshot *string, timeout
|
||||
}
|
||||
|
||||
// downloadPreparer prepares the Download request.
|
||||
func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -666,6 +696,18 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang
|
||||
if rangeGetContentMD5 != nil {
|
||||
req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5))
|
||||
}
|
||||
if rangeGetContentCRC64 != nil {
|
||||
req.Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*rangeGetContentCRC64))
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -694,6 +736,86 @@ func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Res
|
||||
return &downloadResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetAccessControl get the owner, group, permissions, or access control list for a blob.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> upn is optional. Valid only when Hierarchical Namespace is enabled for the
|
||||
// account. If "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will
|
||||
// be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be
|
||||
// returned as Azure Active Directory Object IDs. The default value is false. leaseID is if specified, the operation
|
||||
// only succeeds if the resource's lease is active and matches this ID. ifMatch is specify an ETag value to operate
|
||||
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||
// matching value. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since
|
||||
// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
|
||||
// modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) GetAccessControl(ctx context.Context, timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobGetAccessControlResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.getAccessControlPreparer(timeout, upn, leaseID, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessControlResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlobGetAccessControlResponse), err
|
||||
}
|
||||
|
||||
// getAccessControlPreparer prepares the GetAccessControl request.
|
||||
func (client blobClient) getAccessControlPreparer(timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("HEAD", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
if upn != nil {
|
||||
params.Set("upn", strconv.FormatBool(*upn))
|
||||
}
|
||||
params.Set("action", "getAccessControl")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getAccessControlResponder handles the response to the GetAccessControl request.
|
||||
func (client blobClient) getAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &BlobGetAccessControlResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetAccountInfo returns the sku name and account kind
|
||||
func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
req, err := client.getAccountInfoPreparer()
|
||||
@ -741,20 +863,25 @@ func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeli
|
||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) {
|
||||
// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the
|
||||
// data provided in the request. If not specified, encryption is performed with the root account encryption key. For
|
||||
// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
|
||||
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
|
||||
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
|
||||
// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a
|
||||
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -766,7 +893,7 @@ func (client blobClient) GetProperties(ctx context.Context, snapshot *string, ti
|
||||
}
|
||||
|
||||
// getPropertiesPreparer prepares the GetProperties request.
|
||||
func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("HEAD", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -782,6 +909,15 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32,
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -960,6 +1096,99 @@ func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.R
|
||||
return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// SetAccessControl set the owner, group, permissions, or access control list for a blob.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. owner is optional. The owner of the blob or directory. group is optional. The
|
||||
// owning group of the blob or directory. posixPermissions is optional and only valid if Hierarchical Namespace is
|
||||
// enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each
|
||||
// class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic
|
||||
// (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. posixACL is sets POSIX access control rights on
|
||||
// files and directories. The value is a comma-separated list of access control entries. Each access control entry
|
||||
// (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format
|
||||
// "[scope:][type]:[id]:[permissions]". ifMatch is specify an ETag value to operate only on blobs with a matching
|
||||
// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifModifiedSince is
|
||||
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||
// specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) SetAccessControl(ctx context.Context, timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobSetAccessControlResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.setAccessControlPreparer(timeout, leaseID, owner, group, posixPermissions, posixACL, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessControlResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlobSetAccessControlResponse), err
|
||||
}
|
||||
|
||||
// setAccessControlPreparer prepares the SetAccessControl request.
|
||||
func (client blobClient) setAccessControlPreparer(timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PATCH", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("action", "setAccessControl")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if owner != nil {
|
||||
req.Header.Set("x-ms-owner", *owner)
|
||||
}
|
||||
if group != nil {
|
||||
req.Header.Set("x-ms-group", *group)
|
||||
}
|
||||
if posixPermissions != nil {
|
||||
req.Header.Set("x-ms-permissions", *posixPermissions)
|
||||
}
|
||||
if posixACL != nil {
|
||||
req.Header.Set("x-ms-acl", *posixACL)
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// setAccessControlResponder handles the response to the SetAccessControl request.
|
||||
func (client blobClient) setAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &BlobSetAccessControlResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
@ -1070,20 +1299,25 @@ func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeli
|
||||
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||
// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the
|
||||
// resource's lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a
|
||||
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobSetMetadataResponse, error) {
|
||||
// resource's lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to
|
||||
// encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption
|
||||
// key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256
|
||||
// hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
|
||||
// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is
|
||||
// "AES256". Must be provided if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header
|
||||
// value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify
|
||||
// this header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is
|
||||
// specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to
|
||||
// operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobSetMetadataResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.setMetadataPreparer(timeout, metadata, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.setMetadataPreparer(timeout, metadata, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1095,7 +1329,7 @@ func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metada
|
||||
}
|
||||
|
||||
// setMetadataPreparer prepares the SetMetadata request.
|
||||
func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -1114,6 +1348,15 @@ func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -1152,17 +1395,18 @@ func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.
|
||||
// tier is indicates the tier to be set on the blob. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if
|
||||
// specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (*BlobSetTierResponse, error) {
|
||||
// Timeouts for Blob Service Operations.</a> rehydratePriority is optional: Indicates the priority with which to
|
||||
// rehydrate an archived blob. requestID is provides a client-generated, opaque value with a 1 KB character limit that
|
||||
// is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if specified, the operation
|
||||
// only succeeds if the resource's lease is active and matches this ID.
|
||||
func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (*BlobSetTierResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.setTierPreparer(tier, timeout, requestID, leaseID)
|
||||
req, err := client.setTierPreparer(tier, timeout, rehydratePriority, requestID, leaseID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1174,7 +1418,7 @@ func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeo
|
||||
}
|
||||
|
||||
// setTierPreparer prepares the SetTier request.
|
||||
func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -1186,6 +1430,9 @@ func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, re
|
||||
params.Set("comp", "tier")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-access-tier", string(tier))
|
||||
if rehydratePriority != RehydratePriorityNone {
|
||||
req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
@ -1219,25 +1466,27 @@ func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Resp
|
||||
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
|
||||
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
|
||||
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
|
||||
// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate
|
||||
// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
|
||||
// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
|
||||
// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
|
||||
// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
|
||||
// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob.
|
||||
// rehydratePriority is optional: Indicates the priority with which to rehydrate an archived blob.
|
||||
// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the
|
||||
// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not
|
||||
// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a
|
||||
// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. leaseID is if specified, the
|
||||
// operation only succeeds if the resource's lease is active and matches this ID. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, tier, rehydratePriority, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1249,7 +1498,7 @@ func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string
|
||||
}
|
||||
|
||||
// startCopyFromURLPreparer prepares the StartCopyFromURL request.
|
||||
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -1264,6 +1513,12 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
if tier != AccessTierNone {
|
||||
req.Header.Set("x-ms-access-tier", string(tier))
|
||||
}
|
||||
if rehydratePriority != RehydratePriorityNone {
|
||||
req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority))
|
||||
}
|
||||
if sourceIfModifiedSince != nil {
|
||||
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
|
192
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
192
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
@ -43,27 +43,34 @@ func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient {
|
||||
// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If
|
||||
// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An
|
||||
// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were
|
||||
// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
|
||||
// validated when each was uploaded. transactionalContentMD5 is specify the transactional md5 for the body, to be
|
||||
// validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated
|
||||
// by the service. metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no
|
||||
// name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination
|
||||
// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata,
|
||||
// and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names
|
||||
// must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for
|
||||
// more information. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches
|
||||
// this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional.
|
||||
// Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is
|
||||
// performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage
|
||||
// Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the
|
||||
// x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key
|
||||
// hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is
|
||||
// provided. tier is optional. Indicates the tier to be set on the blob. ifModifiedSince is specify this header value
|
||||
// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this
|
||||
// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify
|
||||
// an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only
|
||||
// on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -75,7 +82,7 @@ func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockL
|
||||
}
|
||||
|
||||
// commitBlockListPreparer prepares the CommitBlockList request.
|
||||
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -101,6 +108,12 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
|
||||
if blobContentMD5 != nil {
|
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
|
||||
}
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if transactionalContentCrc64 != nil {
|
||||
req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
|
||||
}
|
||||
if metadata != nil {
|
||||
for k, v := range metadata {
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
@ -112,6 +125,18 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if tier != AccessTierNone {
|
||||
req.Header.Set("x-ms-access-tier", string(tier))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -238,13 +263,19 @@ func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pip
|
||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
|
||||
// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the
|
||||
// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
|
||||
// transactional md5 for the body, to be validated by the service. transactionalContentCrc64 is specify the
|
||||
// transactional crc64 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
|
||||
// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the
|
||||
// data provided in the request. If not specified, encryption is performed with the root account encryption key. For
|
||||
// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
|
||||
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
|
||||
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
|
||||
// if the x-ms-encryption-key header is provided. requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, requestID *string) (*BlockBlobStageBlockResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
@ -253,7 +284,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID)
|
||||
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -265,7 +296,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
|
||||
}
|
||||
|
||||
// stageBlockPreparer prepares the StageBlock request.
|
||||
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -281,9 +312,21 @@ func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength i
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if transactionalContentCrc64 != nil {
|
||||
req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
@ -309,24 +352,30 @@ func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipel
|
||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||
// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source.
|
||||
// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the
|
||||
// range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the
|
||||
// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For
|
||||
// more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if
|
||||
// it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate
|
||||
// only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to
|
||||
// operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
// Timeouts for Blob Service Operations.</a> encryptionKey is optional. Specifies the encryption key to use to encrypt
|
||||
// the data provided in the request. If not specified, encryption is performed with the root account encryption key.
|
||||
// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of
|
||||
// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is
|
||||
// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be
|
||||
// provided if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the
|
||||
// resource's lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
|
||||
// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
|
||||
// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
|
||||
// only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -338,7 +387,7 @@ func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID str
|
||||
}
|
||||
|
||||
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
|
||||
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -358,6 +407,18 @@ func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentL
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
if sourceContentcrc64 != nil {
|
||||
req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64))
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
@ -400,27 +461,33 @@ func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response)
|
||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property
|
||||
// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content
|
||||
// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage
|
||||
// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with
|
||||
// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated,
|
||||
// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets
|
||||
// the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
|
||||
// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are
|
||||
// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more
|
||||
// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not
|
||||
// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the
|
||||
// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
|
||||
// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies
|
||||
// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed
|
||||
// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services.
|
||||
// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key
|
||||
// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the
|
||||
// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. tier is optional.
|
||||
// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if
|
||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
|
||||
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||
// in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
@ -429,7 +496,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -441,7 +508,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
|
||||
}
|
||||
|
||||
// uploadPreparer prepares the Upload request.
|
||||
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -451,6 +518,9 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if blobContentType != nil {
|
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType)
|
||||
@ -478,6 +548,18 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if tier != AccessTierNone {
|
||||
req.Header.Set("x-ms-access-tier", string(tier))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
||||
|
||||
const (
|
||||
// ServiceVersion specifies the version of the operations used in this package.
|
||||
ServiceVersion = "2018-11-09"
|
||||
ServiceVersion = "2019-02-02"
|
||||
)
|
||||
|
||||
// managementClient is the base client for Azblob.
|
||||
|
1327
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
1327
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
File diff suppressed because it is too large
Load Diff
225
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
225
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
@ -33,23 +33,28 @@ func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient {
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
|
||||
// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not
|
||||
// specified, encryption is performed with the root account encryption key. For more information, see Encryption at
|
||||
// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
|
||||
// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
|
||||
// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
|
||||
// header is provided. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it
|
||||
// has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to
|
||||
// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this
|
||||
// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this
|
||||
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
|
||||
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
|
||||
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
|
||||
// value to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with
|
||||
// a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -61,7 +66,7 @@ func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64
|
||||
}
|
||||
|
||||
// clearPagesPreparer prepares the ClearPages request.
|
||||
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -79,6 +84,15 @@ func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *in
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifSequenceNumberLessThanOrEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
|
||||
}
|
||||
@ -202,35 +216,41 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p
|
||||
// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is
|
||||
// expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set
|
||||
// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
|
||||
// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1
|
||||
// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
|
||||
// Timeouts for Blob Service Operations.</a> tier is optional. Indicates the tier to be set on the page blob.
|
||||
// blobContentType is optional. Sets the blob's content type. If specified, this property is stored with the blob and
|
||||
// returned with a read request. blobContentEncoding is optional. Sets the blob's content encoding. If specified, this
|
||||
// property is stored with the blob and returned with a read request. blobContentLanguage is optional. Set the blob's
|
||||
// content language. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for
|
||||
// the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets the blob's cache
|
||||
// control. If specified, this property is stored with the blob and returned with a read request. metadata is optional.
|
||||
// Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
|
||||
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value
|
||||
// pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from
|
||||
// the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules
|
||||
// for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. leaseID is if
|
||||
// specified, the operation only succeeds if the resource's lease is active and matches this ID. blobContentDisposition
|
||||
// is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies the encryption key to
|
||||
// use to encrypt the data provided in the request. If not specified, encryption is performed with the root account
|
||||
// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the
|
||||
// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
|
||||
// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is
|
||||
// "AES256". Must be provided if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header
|
||||
// value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify
|
||||
// this header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is
|
||||
// specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to
|
||||
// operate only on blobs without a matching value. blobSequenceNumber is set for page blobs only. The sequence number
|
||||
// is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0
|
||||
// and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in
|
||||
// the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
|
||||
req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -242,7 +262,7 @@ func (client pageBlobClient) Create(ctx context.Context, contentLength int64, bl
|
||||
}
|
||||
|
||||
// createPreparer prepares the Create request.
|
||||
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -253,6 +273,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if tier != PremiumPageBlobAccessTierNone {
|
||||
req.Header.Set("x-ms-access-tier", string(tier))
|
||||
}
|
||||
if blobContentType != nil {
|
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType)
|
||||
}
|
||||
@ -279,6 +302,15 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -526,20 +558,25 @@ func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response)
|
||||
// see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
|
||||
// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the
|
||||
// data provided in the request. If not specified, encryption is performed with the root account encryption key. For
|
||||
// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
|
||||
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
|
||||
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
|
||||
// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a
|
||||
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -551,7 +588,7 @@ func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64
|
||||
}
|
||||
|
||||
// resizePreparer prepares the Resize request.
|
||||
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -565,6 +602,15 @@ func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *in
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
@ -682,21 +728,26 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the
|
||||
// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// body, to be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to
|
||||
// be validated by the service. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
|
||||
// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not
|
||||
// specified, encryption is performed with the root account encryption key. For more information, see Encryption at
|
||||
// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
|
||||
// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
|
||||
// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
|
||||
// header is provided. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it
|
||||
// has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to
|
||||
// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this
|
||||
// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this
|
||||
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
|
||||
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
|
||||
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
|
||||
// value to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with
|
||||
// a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
@ -705,7 +756,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -717,7 +768,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
|
||||
}
|
||||
|
||||
// uploadPagesPreparer prepares the UploadPages request.
|
||||
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -732,12 +783,24 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if transactionalContentCrc64 != nil {
|
||||
req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64))
|
||||
}
|
||||
if rangeParameter != nil {
|
||||
req.Header.Set("x-ms-range", *rangeParameter)
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if ifSequenceNumberLessThanOrEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
|
||||
}
|
||||
@ -785,32 +848,38 @@ func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipel
|
||||
// length of this range should match the ContentLength header and x-ms-range/Range destination range header.
|
||||
// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be
|
||||
// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated
|
||||
// for the range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated
|
||||
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only
|
||||
// on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this
|
||||
// header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo
|
||||
// is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is
|
||||
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||
// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
|
||||
// specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this
|
||||
// header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// Timeouts for Blob Service Operations.</a> encryptionKey is optional. Specifies the encryption key to use to encrypt
|
||||
// the data provided in the request. If not specified, encryption is performed with the root account encryption key.
|
||||
// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of
|
||||
// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is
|
||||
// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be
|
||||
// provided if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the
|
||||
// resource's lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to
|
||||
// operate only on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is
|
||||
// specify this header value to operate only on a blob if it has a sequence number less than the specified.
|
||||
// ifSequenceNumberEqualTo is specify this header value to operate only on a blob if it has the specified sequence
|
||||
// number. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the
|
||||
// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
|
||||
// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
|
||||
// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince
|
||||
// is specify this header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||
// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
|
||||
// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -822,7 +891,7 @@ func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL s
|
||||
}
|
||||
|
||||
// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
|
||||
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -838,8 +907,20 @@ func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, source
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
if sourceContentcrc64 != nil {
|
||||
req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64))
|
||||
}
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
req.Header.Set("x-ms-range", rangeParameter)
|
||||
if encryptionKey != nil {
|
||||
req.Header.Set("x-ms-encryption-key", *encryptionKey)
|
||||
}
|
||||
if encryptionKeySha256 != nil {
|
||||
req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256)
|
||||
}
|
||||
if encryptionAlgorithm != EncryptionAlgorithmNone {
|
||||
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
|
61
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
generated
vendored
61
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
generated
vendored
@ -203,7 +203,7 @@ func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipe
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetUserDelegationKey retrieves a user delgation key for the Blob service. This is only a valid operation when using
|
||||
// GetUserDelegationKey retrieves a user delegation key for the Blob service. This is only a valid operation when using
|
||||
// bearer token authentication.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
@ -465,3 +465,62 @@ func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipe
|
||||
resp.Response().Body.Close()
|
||||
return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request.
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be
|
||||
// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_<GUID> timeout is the
|
||||
// timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*SubmitBatchResponse), err
|
||||
}
|
||||
|
||||
// submitBatchPreparer prepares the SubmitBatch request.
|
||||
func (client serviceClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("POST", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "batch")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
req.Header.Set("Content-Type", multipartContentType)
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// submitBatchResponder handles the response to the SubmitBatch request.
|
||||
func (client serviceClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SubmitBatchResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
generated
vendored
@ -5,7 +5,7 @@ package azblob
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09"
|
||||
return "Azure-SDK-For-Go/0.0.0 azblob/2019-02-02"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go
generated
vendored
@ -45,7 +45,7 @@ func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry.
|
||||
// DownloadResponse wraps AutoRest generated DownloadResponse and helps to provide info for retry.
|
||||
type DownloadResponse struct {
|
||||
r *downloadResponse
|
||||
ctx context.Context
|
||||
|
113
vendor/github.com/aalpar/deheap/fuzz.go
generated
vendored
Normal file
113
vendor/github.com/aalpar/deheap/fuzz.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
// Run fuzz tests on the package
|
||||
//
|
||||
// This is done with a byte heap to test and a simple reimplementation
|
||||
// to check correctness against.
|
||||
//
|
||||
// First install go-fuzz
|
||||
//
|
||||
// go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
//
|
||||
// Next build the instrumented package
|
||||
//
|
||||
// go-fuzz-build
|
||||
//
|
||||
// Finally fuzz away
|
||||
//
|
||||
// go-fuzz
|
||||
//
|
||||
// See https://github.com/dvyukov/go-fuzz for more instructions
|
||||
|
||||
//+build gofuzz
|
||||
|
||||
package deheap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// An byteHeap is a double ended heap of bytes
|
||||
type byteDeheap []byte
|
||||
|
||||
func (h byteDeheap) Len() int { return len(h) }
|
||||
func (h byteDeheap) Less(i, j int) bool { return h[i] < h[j] }
|
||||
func (h byteDeheap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
|
||||
func (h *byteDeheap) Push(x interface{}) {
|
||||
*h = append(*h, x.(byte))
|
||||
}
|
||||
|
||||
func (h *byteDeheap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
*h = old[:n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
// sortedHeap is an inefficient reimplementation for test purposes
|
||||
type sortedHeap []byte
|
||||
|
||||
func (h *sortedHeap) Push(x byte) {
|
||||
data := *h
|
||||
i := sort.Search(len(data), func(i int) bool { return data[i] >= x })
|
||||
// i is the either the position of x or where it should be inserted
|
||||
data = append(data, 0)
|
||||
copy(data[i+1:], data[i:])
|
||||
data[i] = x
|
||||
*h = data
|
||||
}
|
||||
|
||||
func (h *sortedHeap) Pop() (x byte) {
|
||||
data := *h
|
||||
x = data[0]
|
||||
*h = data[1:]
|
||||
return x
|
||||
}
|
||||
|
||||
func (h *sortedHeap) PopMax() (x byte) {
|
||||
data := *h
|
||||
x = data[len(data)-1]
|
||||
*h = data[:len(data)-1]
|
||||
return x
|
||||
}
|
||||
|
||||
// Fuzzer input is a string of bytes.
|
||||
//
|
||||
// If the byte is one of these, then the action is performed
|
||||
// '<' Pop (minimum)
|
||||
// '>' PopMax
|
||||
// Otherwise the bytes is Pushed onto the heap
|
||||
func Fuzz(data []byte) int {
|
||||
h := &byteDeheap{}
|
||||
Init(h)
|
||||
s := sortedHeap{}
|
||||
|
||||
for _, c := range data {
|
||||
switch c {
|
||||
case '<':
|
||||
if h.Len() > 0 {
|
||||
got := Pop(h)
|
||||
want := s.Pop()
|
||||
if got != want {
|
||||
panic(fmt.Sprintf("Pop: want = %d, got = %d", want, got))
|
||||
}
|
||||
}
|
||||
case '>':
|
||||
if h.Len() > 0 {
|
||||
got := PopMax(h)
|
||||
want := s.PopMax()
|
||||
if got != want {
|
||||
panic(fmt.Sprintf("PopMax: want = %d, got = %d", want, got))
|
||||
}
|
||||
}
|
||||
default:
|
||||
Push(h, c)
|
||||
s.Push(c)
|
||||
}
|
||||
if len(s) != h.Len() {
|
||||
panic("wrong length")
|
||||
}
|
||||
}
|
||||
return 1
|
||||
}
|
3
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
@ -43,7 +43,7 @@ type Config struct {
|
||||
|
||||
// An optional endpoint URL (hostname only or fully qualified URI)
|
||||
// that overrides the default generated endpoint for a client. Set this
|
||||
// to `""` to use the default generated endpoint.
|
||||
// to `nil` to use the default generated endpoint.
|
||||
//
|
||||
// Note: You must still provide a `Region` value when specifying an
|
||||
// endpoint for a client.
|
||||
@ -238,6 +238,7 @@ type Config struct {
|
||||
|
||||
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
|
||||
// have the definition in its model. By default, endpoint discovery is off.
|
||||
// To use EndpointDiscovery, Endpoint should be unset or set to an empty string.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
|
35
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
35
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
@ -107,6 +107,13 @@ type Provider interface {
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// ProviderWithContext is a Provider that can retrieve credentials with a Context
|
||||
type ProviderWithContext interface {
|
||||
Provider
|
||||
|
||||
RetrieveWithContext(Context) (Value, error)
|
||||
}
|
||||
|
||||
// An Expirer is an interface that Providers can implement to expose the expiration
|
||||
// time, if known. If the Provider cannot accurately provide this info,
|
||||
// it should not implement this interface.
|
||||
@ -233,7 +240,9 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
|
||||
// Cannot pass context down to the actual retrieve, because the first
|
||||
// context would cancel the whole group when there is not direct
|
||||
// association of items in the group.
|
||||
resCh := c.sf.DoChan("", c.singleRetrieve)
|
||||
resCh := c.sf.DoChan("", func() (interface{}, error) {
|
||||
return c.singleRetrieve(&suppressedContext{ctx})
|
||||
})
|
||||
select {
|
||||
case res := <-resCh:
|
||||
return res.Val.(Value), res.Err
|
||||
@ -243,12 +252,16 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Credentials) singleRetrieve() (interface{}, error) {
|
||||
func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) {
|
||||
if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
|
||||
return curCreds.(Value), nil
|
||||
}
|
||||
|
||||
creds, err := c.provider.Retrieve()
|
||||
if p, ok := c.provider.(ProviderWithContext); ok {
|
||||
creds, err = p.RetrieveWithContext(ctx)
|
||||
} else {
|
||||
creds, err = c.provider.Retrieve()
|
||||
}
|
||||
if err == nil {
|
||||
c.creds.Store(creds)
|
||||
}
|
||||
@ -308,3 +321,19 @@ func (c *Credentials) ExpiresAt() (time.Time, error) {
|
||||
}
|
||||
return expirer.ExpiresAt(), nil
|
||||
}
|
||||
|
||||
type suppressedContext struct {
|
||||
Context
|
||||
}
|
||||
|
||||
func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func (s *suppressedContext) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *suppressedContext) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
20
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
@ -87,7 +88,14 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired credentials.
|
||||
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
||||
credsList, err := requestCredList(m.Client)
|
||||
return m.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired credentials.
|
||||
func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
credsList, err := requestCredList(ctx, m.Client)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
@ -97,7 +105,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
||||
}
|
||||
credsName := credsList[0]
|
||||
|
||||
roleCreds, err := requestCred(m.Client, credsName)
|
||||
roleCreds, err := requestCred(ctx, m.Client, credsName)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
@ -130,8 +138,8 @@ const iamSecurityCredsPath = "iam/security-credentials/"
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
||||
func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||
resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath)
|
||||
if err != nil {
|
||||
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
||||
}
|
||||
@ -154,8 +162,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||
//
|
||||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
|
||||
func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||
resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName))
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("EC2RoleRequestError",
|
||||
|
11
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
@ -116,7 +116,13 @@ func (p *Provider) IsExpired() bool {
|
||||
// Retrieve will attempt to request the credentials from the endpoint the Provider
|
||||
// was configured for. And error will be returned if the retrieval fails.
|
||||
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||
resp, err := p.getCredentials()
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider
|
||||
// was configured for. And error will be returned if the retrieval fails.
|
||||
func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
resp, err := p.getCredentials(ctx)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName},
|
||||
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
|
||||
@ -148,7 +154,7 @@ type errorOutput struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
|
||||
func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetCredentials",
|
||||
HTTPMethod: "GET",
|
||||
@ -156,6 +162,7 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
|
||||
|
||||
out := &getCredentialsOutput{}
|
||||
req := p.Client.NewRequest(op, nil, out)
|
||||
req.SetContext(ctx)
|
||||
req.HTTPRequest.Header.Set("Accept", "application/json")
|
||||
if authToken := p.AuthorizationToken; len(authToken) != 0 {
|
||||
req.HTTPRequest.Header.Set("Authorization", authToken)
|
||||
|
5
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
@ -17,8 +17,9 @@ var (
|
||||
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
|
||||
)
|
||||
|
||||
// A SharedCredentialsProvider retrieves credentials from the current user's home
|
||||
// directory, and keeps track if those credentials are expired.
|
||||
// A SharedCredentialsProvider retrieves access key pair (access key ID,
|
||||
// secret access key, and session token if present) credentials from the current
|
||||
// user's home directory, and keeps track if those credentials are expired.
|
||||
//
|
||||
// Profile ini file example: $HOME/.aws/credentials
|
||||
type SharedCredentialsProvider struct {
|
||||
|
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
@ -19,7 +19,9 @@ type StaticProvider struct {
|
||||
}
|
||||
|
||||
// NewStaticCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a static credentials value provider.
|
||||
// wrapping a static credentials value provider. Token is only required
|
||||
// for temporary security credentials retrieved via STS, otherwise an empty
|
||||
// string can be passed for this parameter.
|
||||
func NewStaticCredentials(id, secret, token string) *Credentials {
|
||||
return NewCredentials(&StaticProvider{Value: Value{
|
||||
AccessKeyID: id,
|
||||
|
44
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
44
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
@ -87,6 +87,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
@ -118,6 +119,10 @@ type AssumeRoler interface {
|
||||
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
type assumeRolerWithContext interface {
|
||||
AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the credentials
|
||||
// will be valid for.
|
||||
var DefaultDuration = time.Duration(15) * time.Minute
|
||||
@ -164,6 +169,29 @@ type AssumeRoleProvider struct {
|
||||
// size.
|
||||
Policy *string
|
||||
|
||||
// The ARNs of IAM managed policies you want to use as managed session policies.
|
||||
// The policies must exist in the same account as the role.
|
||||
//
|
||||
// This parameter is optional. You can provide up to 10 managed policy ARNs.
|
||||
// However, the plain text that you use for both inline and managed session
|
||||
// policies can't exceed 2,048 characters.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
// policy and the session policies. You can use the role's temporary credentials
|
||||
// in subsequent AWS API calls to access resources in the account that owns
|
||||
// the role. You cannot use session policies to grant more permissions than
|
||||
// those allowed by the identity-based policy of the role that is being assumed.
|
||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
PolicyArns []*sts.PolicyDescriptorType
|
||||
|
||||
// The identification number of the MFA device that is associated with the user
|
||||
// who is making the AssumeRole call. Specify this value if the trust policy
|
||||
// of the role being assumed includes a condition that requires MFA authentication.
|
||||
@ -265,6 +293,11 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*
|
||||
|
||||
// Retrieve generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
// Apply defaults where parameters are not set.
|
||||
if p.RoleSessionName == "" {
|
||||
// Try to work out a role name that will hopefully end up unique.
|
||||
@ -281,6 +314,7 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
RoleSessionName: aws.String(p.RoleSessionName),
|
||||
ExternalId: p.ExternalID,
|
||||
Tags: p.Tags,
|
||||
PolicyArns: p.PolicyArns,
|
||||
TransitiveTagKeys: p.TransitiveTagKeys,
|
||||
}
|
||||
if p.Policy != nil {
|
||||
@ -304,7 +338,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
}
|
||||
}
|
||||
|
||||
roleOutput, err := p.Client.AssumeRole(input)
|
||||
var roleOutput *sts.AssumeRoleOutput
|
||||
var err error
|
||||
|
||||
if c, ok := p.Client.(assumeRolerWithContext); ok {
|
||||
roleOutput, err = c.AssumeRoleWithContext(ctx, input)
|
||||
} else {
|
||||
roleOutput, err = p.Client.AssumeRole(input)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
45
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
45
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
@ -28,15 +28,34 @@ const (
|
||||
// compare test values.
|
||||
var now = time.Now
|
||||
|
||||
// TokenFetcher shuold return WebIdentity token bytes or an error
|
||||
type TokenFetcher interface {
|
||||
FetchToken(credentials.Context) ([]byte, error)
|
||||
}
|
||||
|
||||
// FetchTokenPath is a path to a WebIdentity token file
|
||||
type FetchTokenPath string
|
||||
|
||||
// FetchToken returns a token by reading from the filesystem
|
||||
func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) {
|
||||
data, err := ioutil.ReadFile(string(f))
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("unable to read file at %s", f)
|
||||
return nil, awserr.New(ErrCodeWebIdentity, errMsg, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// WebIdentityRoleProvider is used to retrieve credentials using
|
||||
// an OIDC token.
|
||||
type WebIdentityRoleProvider struct {
|
||||
credentials.Expiry
|
||||
PolicyArns []*sts.PolicyDescriptorType
|
||||
|
||||
client stsiface.STSAPI
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
tokenFilePath string
|
||||
tokenFetcher TokenFetcher
|
||||
roleARN string
|
||||
roleSessionName string
|
||||
}
|
||||
@ -52,9 +71,15 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName
|
||||
// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
|
||||
// provided stsiface.STSAPI
|
||||
func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
|
||||
return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path))
|
||||
}
|
||||
|
||||
// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the
|
||||
// provided stsiface.STSAPI and a TokenFetcher
|
||||
func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider {
|
||||
return &WebIdentityRoleProvider{
|
||||
client: svc,
|
||||
tokenFilePath: path,
|
||||
tokenFetcher: tokenFetcher,
|
||||
roleARN: roleARN,
|
||||
roleSessionName: roleSessionName,
|
||||
}
|
||||
@ -64,10 +89,16 @@ func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, p
|
||||
// 'WebIdentityTokenFilePath' specified destination and if that is empty an
|
||||
// error will be returned.
|
||||
func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
b, err := ioutil.ReadFile(p.tokenFilePath)
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext will attempt to assume a role from a token which is located at
|
||||
// 'WebIdentityTokenFilePath' specified destination and if that is empty an
|
||||
// error will be returned.
|
||||
func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
b, err := p.tokenFetcher.FetchToken(ctx)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
|
||||
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
|
||||
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err)
|
||||
}
|
||||
|
||||
sessionName := p.roleSessionName
|
||||
@ -77,10 +108,14 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
sessionName = strconv.FormatInt(now().UnixNano(), 10)
|
||||
}
|
||||
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
|
||||
PolicyArns: p.PolicyArns,
|
||||
RoleArn: &p.roleARN,
|
||||
RoleSessionName: &sessionName,
|
||||
WebIdentityToken: aws.String(string(b)),
|
||||
})
|
||||
|
||||
req.SetContext(ctx)
|
||||
|
||||
// InvalidIdentityToken error is a temporary error that can occur
|
||||
// when assuming an Role with a JWT web identity token.
|
||||
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
|
||||
|
61
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
61
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
@ -15,7 +16,7 @@ import (
|
||||
|
||||
// getToken uses the duration to return a token for EC2 metadata service,
|
||||
// or an error if the request failed.
|
||||
func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) {
|
||||
func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetToken",
|
||||
HTTPMethod: "PUT",
|
||||
@ -24,6 +25,7 @@ func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) {
|
||||
|
||||
var output tokenOutput
|
||||
req := c.NewRequest(op, nil, &output)
|
||||
req.SetContext(ctx)
|
||||
|
||||
// remove the fetch token handler from the request handlers to avoid infinite recursion
|
||||
req.Handlers.Sign.RemoveByName(fetchTokenHandlerName)
|
||||
@ -50,6 +52,13 @@ func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) {
|
||||
// instance metadata service. The content will be returned as a string, or
|
||||
// error if the request failed.
|
||||
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
return c.GetMetadataWithContext(aws.BackgroundContext(), p)
|
||||
}
|
||||
|
||||
// GetMetadataWithContext uses the path provided to request information from the EC2
|
||||
// instance metadata service. The content will be returned as a string, or
|
||||
// error if the request failed.
|
||||
func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetMetadata",
|
||||
HTTPMethod: "GET",
|
||||
@ -59,6 +68,8 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
|
||||
req := c.NewRequest(op, nil, output)
|
||||
|
||||
req.SetContext(ctx)
|
||||
|
||||
err := req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
@ -67,6 +78,13 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
// there is no user-data setup for the EC2 instance a "NotFoundError" error
|
||||
// code will be returned.
|
||||
func (c *EC2Metadata) GetUserData() (string, error) {
|
||||
return c.GetUserDataWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// GetUserDataWithContext returns the userdata that was configured for the service. If
|
||||
// there is no user-data setup for the EC2 instance a "NotFoundError" error
|
||||
// code will be returned.
|
||||
func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetUserData",
|
||||
HTTPMethod: "GET",
|
||||
@ -75,6 +93,7 @@ func (c *EC2Metadata) GetUserData() (string, error) {
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
req.SetContext(ctx)
|
||||
|
||||
err := req.Send()
|
||||
return output.Content, err
|
||||
@ -84,6 +103,13 @@ func (c *EC2Metadata) GetUserData() (string, error) {
|
||||
// instance metadata service for dynamic data. The content will be returned
|
||||
// as a string, or error if the request failed.
|
||||
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
return c.GetDynamicDataWithContext(aws.BackgroundContext(), p)
|
||||
}
|
||||
|
||||
// GetDynamicDataWithContext uses the path provided to request information from the EC2
|
||||
// instance metadata service for dynamic data. The content will be returned
|
||||
// as a string, or error if the request failed.
|
||||
func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetDynamicData",
|
||||
HTTPMethod: "GET",
|
||||
@ -92,6 +118,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
req.SetContext(ctx)
|
||||
|
||||
err := req.Send()
|
||||
return output.Content, err
|
||||
@ -101,7 +128,14 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
// instance. Error is returned if the request fails or is unable to parse
|
||||
// the response.
|
||||
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
||||
resp, err := c.GetDynamicData("instance-identity/document")
|
||||
return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an
|
||||
// instance. Error is returned if the request fails or is unable to parse
|
||||
// the response.
|
||||
func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) {
|
||||
resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document")
|
||||
if err != nil {
|
||||
return EC2InstanceIdentityDocument{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
@ -120,7 +154,12 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument
|
||||
|
||||
// IAMInfo retrieves IAM info from the metadata API
|
||||
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||
resp, err := c.GetMetadata("iam/info")
|
||||
return c.IAMInfoWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// IAMInfoWithContext retrieves IAM info from the metadata API
|
||||
func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) {
|
||||
resp, err := c.GetMetadataWithContext(ctx, "iam/info")
|
||||
if err != nil {
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
@ -145,7 +184,12 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||
|
||||
// Region returns the region the instance is running in.
|
||||
func (c *EC2Metadata) Region() (string, error) {
|
||||
ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocument()
|
||||
return c.RegionWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RegionWithContext returns the region the instance is running in.
|
||||
func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) {
|
||||
ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -162,7 +206,14 @@ func (c *EC2Metadata) Region() (string, error) {
|
||||
// Can be used to determine if application is running within an EC2 Instance and
|
||||
// the metadata service is available.
|
||||
func (c *EC2Metadata) Available() bool {
|
||||
if _, err := c.GetMetadata("instance-id"); err != nil {
|
||||
return c.AvailableWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// AvailableWithContext returns if the application has access to the EC2 Metadata service.
|
||||
// Can be used to determine if application is running within an EC2 Instance and
|
||||
// the metadata service is available.
|
||||
func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool {
|
||||
if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
generated
vendored
@ -46,7 +46,7 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
output, err := t.client.getToken(t.configuredTTL)
|
||||
output, err := t.client.getToken(r.Context(), t.configuredTTL)
|
||||
|
||||
if err != nil {
|
||||
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
@ -93,7 +93,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
|
||||
}
|
||||
|
||||
func custAddS3DualStack(p *partition) {
|
||||
if p.ID != "aws" {
|
||||
if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") {
|
||||
return
|
||||
}
|
||||
|
||||
|
3265
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
3265
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load Diff
16
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
@ -7,6 +7,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`)
|
||||
|
||||
type partitions []partition
|
||||
|
||||
func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
@ -124,7 +126,7 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (
|
||||
|
||||
defs := []endpoint{p.Defaults, s.Defaults}
|
||||
|
||||
return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil
|
||||
return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt)
|
||||
}
|
||||
|
||||
func serviceList(ss services) []string {
|
||||
@ -233,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string {
|
||||
return s[0]
|
||||
}
|
||||
|
||||
func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
|
||||
func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) {
|
||||
var merged endpoint
|
||||
for _, def := range defs {
|
||||
merged.mergeIn(def)
|
||||
@ -260,6 +262,10 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [
|
||||
region = signingRegion
|
||||
}
|
||||
|
||||
if !validateInputRegion(region) {
|
||||
return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided")
|
||||
}
|
||||
|
||||
u := strings.Replace(hostname, "{service}", service, 1)
|
||||
u = strings.Replace(u, "{region}", region, 1)
|
||||
u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
|
||||
@ -274,7 +280,7 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [
|
||||
SigningName: signingName,
|
||||
SigningNameDerived: signingNameDerived,
|
||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getEndpointScheme(protocols []string, disableSSL bool) string {
|
||||
@ -339,3 +345,7 @@ const (
|
||||
boxedFalse
|
||||
boxedTrue
|
||||
)
|
||||
|
||||
func validateInputRegion(region string) bool {
|
||||
return regionValidationRegex.MatchString(region)
|
||||
}
|
||||
|
10
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
@ -3,6 +3,7 @@ package session
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
@ -206,7 +207,14 @@ func credsFromAssumeRole(cfg aws.Config,
|
||||
sharedCfg.RoleARN,
|
||||
func(opt *stscreds.AssumeRoleProvider) {
|
||||
opt.RoleSessionName = sharedCfg.RoleSessionName
|
||||
opt.Duration = sessOpts.AssumeRoleDuration
|
||||
|
||||
if sessOpts.AssumeRoleDuration == 0 &&
|
||||
sharedCfg.AssumeRoleDuration != nil &&
|
||||
*sharedCfg.AssumeRoleDuration/time.Minute > 15 {
|
||||
opt.Duration = *sharedCfg.AssumeRoleDuration
|
||||
} else if sessOpts.AssumeRoleDuration != 0 {
|
||||
opt.Duration = sessOpts.AssumeRoleDuration
|
||||
}
|
||||
|
||||
// Assume role with external ID
|
||||
if len(sharedCfg.ExternalID) > 0 {
|
||||
|
28
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
@ -2,6 +2,7 @@ package session
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
@ -16,12 +17,13 @@ const (
|
||||
sessionTokenKey = `aws_session_token` // optional
|
||||
|
||||
// Assume Role Credentials group
|
||||
roleArnKey = `role_arn` // group required
|
||||
sourceProfileKey = `source_profile` // group required (or credential_source)
|
||||
credentialSourceKey = `credential_source` // group required (or source_profile)
|
||||
externalIDKey = `external_id` // optional
|
||||
mfaSerialKey = `mfa_serial` // optional
|
||||
roleSessionNameKey = `role_session_name` // optional
|
||||
roleArnKey = `role_arn` // group required
|
||||
sourceProfileKey = `source_profile` // group required (or credential_source)
|
||||
credentialSourceKey = `credential_source` // group required (or source_profile)
|
||||
externalIDKey = `external_id` // optional
|
||||
mfaSerialKey = `mfa_serial` // optional
|
||||
roleSessionNameKey = `role_session_name` // optional
|
||||
roleDurationSecondsKey = "duration_seconds" // optional
|
||||
|
||||
// CSM options
|
||||
csmEnabledKey = `csm_enabled`
|
||||
@ -73,10 +75,11 @@ type sharedConfig struct {
|
||||
CredentialProcess string
|
||||
WebIdentityTokenFile string
|
||||
|
||||
RoleARN string
|
||||
RoleSessionName string
|
||||
ExternalID string
|
||||
MFASerial string
|
||||
RoleARN string
|
||||
RoleSessionName string
|
||||
ExternalID string
|
||||
MFASerial string
|
||||
AssumeRoleDuration *time.Duration
|
||||
|
||||
SourceProfileName string
|
||||
SourceProfile *sharedConfig
|
||||
@ -274,6 +277,11 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
||||
updateString(&cfg.CredentialSource, section, credentialSourceKey)
|
||||
updateString(&cfg.Region, section, regionKey)
|
||||
|
||||
if section.Has(roleDurationSecondsKey) {
|
||||
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
|
||||
cfg.AssumeRoleDuration = &d
|
||||
}
|
||||
|
||||
if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
|
||||
sre, err := endpoints.GetSTSRegionalEndpoint(v)
|
||||
if err != nil {
|
||||
|
23
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
23
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
@ -239,3 +239,26 @@ func (es errors) Error() string {
|
||||
|
||||
return strings.Join(parts, "\n")
|
||||
}
|
||||
|
||||
// CopySeekableBody copies the seekable body to an io.Writer
|
||||
func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
|
||||
curPos, err := src.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// copy errors may be assumed to be from the body.
|
||||
n, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// seek back to the first position after reading to reset
|
||||
// the body for transmission.
|
||||
_, err = src.Seek(curPos, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.29.9"
|
||||
const SDKVersion = "1.32.11"
|
||||
|
53
vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go
generated
vendored
Normal file
53
vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package checksum
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const contentMD5Header = "Content-Md5"
|
||||
|
||||
// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func AddBodyContentMD5Handler(r *request.Request) {
|
||||
// if Content-MD5 header is already present, return
|
||||
if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// if S3DisableContentMD5Validation flag is set, return
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
|
||||
// if request is presigned, return
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
|
||||
// if body is not seekable, return
|
||||
if !aws.IsReaderSeekable(r.Body) {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(
|
||||
"Unable to compute Content-MD5 for unseekable body, S3.%s",
|
||||
r.Operation.Name))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
|
||||
if _, err := aws.CopySeekableBody(h, r.Body); err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
r.HTTPRequest.Header.Set(contentMD5Header, v)
|
||||
}
|
3
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
@ -56,7 +56,8 @@ func FormatTime(name string, t time.Time) string {
|
||||
case ISO8601TimeFormatName:
|
||||
return t.Format(ISO8601OutputTimeFormat)
|
||||
case UnixTimeFormatName:
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
ms := t.UnixNano() / int64(time.Millisecond)
|
||||
return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64)
|
||||
default:
|
||||
panic("unknown timestamp format name, " + name)
|
||||
}
|
||||
|
9
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
@ -60,6 +61,14 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle
|
||||
return nil
|
||||
}
|
||||
|
||||
xml := tag.Get("xml")
|
||||
if len(xml) != 0 {
|
||||
name := strings.SplitAfterN(xml, ",", 2)[0]
|
||||
if name == "-" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch value.Kind() {
|
||||
|
8
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
@ -64,6 +64,14 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
|
||||
// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
|
||||
// will be used to determine the type from r.
|
||||
func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
xml := tag.Get("xml")
|
||||
if len(xml) != 0 {
|
||||
name := strings.SplitAfterN(xml, ",", 2)[0]
|
||||
if name == "-" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
rtype := r.Type()
|
||||
if rtype.Kind() == reflect.Ptr {
|
||||
rtype = rtype.Elem() // check kind of actual element type
|
||||
|
809
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
809
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
49
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
49
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
@ -13,7 +13,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -25,30 +24,6 @@ const (
|
||||
appendMD5TxEncoding = "append-md5"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
if !aws.IsReaderSeekable(r.Body) {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(
|
||||
"Unable to compute Content-MD5 for unseekable body, S3.%s",
|
||||
r.Operation.Name))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(h, r.Body); err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
r.HTTPRequest.Header.Set(contentMD5Header, v)
|
||||
}
|
||||
|
||||
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
|
||||
// request. If the body is not seekable or S3DisableContentMD5Validation set
|
||||
// this handler will be ignored.
|
||||
@ -90,7 +65,7 @@ func computeBodyHashes(r *request.Request) {
|
||||
dst = io.MultiWriter(hashers...)
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(dst, r.Body); err != nil {
|
||||
if _, err := aws.CopySeekableBody(dst, r.Body); err != nil {
|
||||
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
|
||||
return
|
||||
}
|
||||
@ -119,28 +94,6 @@ const (
|
||||
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
|
||||
)
|
||||
|
||||
func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
|
||||
curPos, err := src.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
n, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
_, err = src.Seek(curPos, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Adds the x-amz-te: append_md5 header to the request. This requests the service
|
||||
// responds with a trailing MD5 checksum.
|
||||
//
|
||||
|
6
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
@ -33,12 +33,6 @@ func defaultInitRequestFn(r *request.Request) {
|
||||
platformRequestHandlers(r)
|
||||
|
||||
switch r.Operation.Name {
|
||||
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
|
||||
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
|
||||
opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
|
||||
opPutBucketReplication:
|
||||
// These S3 operations require Content-MD5 to be set
|
||||
r.Handlers.Build.PushBack(contentMD5)
|
||||
case opGetBucketLocation:
|
||||
// GetBucketLocation has custom parsing logic
|
||||
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
|
||||
|
16
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
@ -2,6 +2,7 @@ package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
@ -24,17 +25,18 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
r.HTTPResponse.Body = ioutil.NopCloser(body)
|
||||
defer body.Seek(0, sdkio.SeekStart)
|
||||
|
||||
if body.Len() == 0 {
|
||||
// If there is no body don't attempt to parse the body.
|
||||
return
|
||||
}
|
||||
|
||||
unmarshalError(r)
|
||||
if err, ok := r.Error.(awserr.Error); ok && err != nil {
|
||||
if err.Code() == request.ErrCodeSerialization {
|
||||
if err.Code() == request.ErrCodeSerialization &&
|
||||
err.OrigErr() != io.EOF {
|
||||
r.Error = nil
|
||||
return
|
||||
}
|
||||
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
|
||||
// if empty payload
|
||||
if err.OrigErr() == io.EOF {
|
||||
r.HTTPResponse.StatusCode = http.StatusInternalServerError
|
||||
} else {
|
||||
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
|
||||
}
|
||||
}
|
||||
}
|
||||
|
40
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -45,17 +46,24 @@ func unmarshalError(r *request.Request) {
|
||||
|
||||
// Attempt to parse error from body if it is known
|
||||
var errResp xmlErrorResponse
|
||||
err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
|
||||
if err == io.EOF {
|
||||
// Only capture the error if an unmarshal error occurs that is not EOF,
|
||||
// because S3 might send an error without a error message which causes
|
||||
// the XML unmarshal to fail with EOF.
|
||||
err = nil
|
||||
var err error
|
||||
if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 {
|
||||
err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body)
|
||||
} else {
|
||||
err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
var errorMsg string
|
||||
if err == io.EOF {
|
||||
errorMsg = "empty response payload"
|
||||
} else {
|
||||
errorMsg = "failed to unmarshal error message"
|
||||
}
|
||||
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to unmarshal error message", err),
|
||||
errorMsg, err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
@ -86,3 +94,21 @@ type RequestFailure interface {
|
||||
// Host ID is the S3 Host ID needed for debug, and contacting support
|
||||
HostID() string
|
||||
}
|
||||
|
||||
// s3unmarshalXMLError is s3 specific xml error unmarshaler
|
||||
// for 200 OK errors and response payloads.
|
||||
// This function differs from the xmlUtil.UnmarshalXMLError
|
||||
// func. It does not ignore the EOF error and passes it up.
|
||||
// Related to bug fix for `s3 200 OK response with empty payload`
|
||||
func s3unmarshalXMLError(v interface{}, stream io.Reader) error {
|
||||
var errBuf bytes.Buffer
|
||||
body := io.TeeReader(stream, &errBuf)
|
||||
|
||||
err := xml.NewDecoder(body).Decode(v)
|
||||
if err != nil && err != io.EOF {
|
||||
return awserr.NewUnmarshalError(err,
|
||||
"failed to unmarshal error message", errBuf.Bytes())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
4
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
@ -1788,7 +1788,7 @@ type AssumeRoleWithSAMLInput struct {
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// SAMLAssertion is a required field
|
||||
SAMLAssertion *string `min:"4" type:"string" required:"true"`
|
||||
SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@ -2100,7 +2100,7 @@ type AssumeRoleWithWebIdentityInput struct {
|
||||
// the application makes an AssumeRoleWithWebIdentity call.
|
||||
//
|
||||
// WebIdentityToken is a required field
|
||||
WebIdentityToken *string `min:"4" type:"string" required:"true"`
|
||||
WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
34
vendor/github.com/billziss-gh/cgofuse/fuse/fsop.go
generated
vendored
34
vendor/github.com/billziss-gh/cgofuse/fuse/fsop.go
generated
vendored
@ -21,6 +21,11 @@
|
||||
// In order to expose the user mode file system to the OS, the file system must be hosted
|
||||
// (mounted) by a FileSystemHost. The FileSystemHost Mount() method is used for this
|
||||
// purpose.
|
||||
//
|
||||
// A note on thread-safety: In general FUSE file systems are expected to protect their
|
||||
// own data structures. Many FUSE implementations provide a -s command line option that
|
||||
// when used, it instructs the FUSE implementation to serialize requests. This option
|
||||
// can be passed to the FileSystemHost Mount() method, when the file system is mounted.
|
||||
package fuse
|
||||
|
||||
import (
|
||||
@ -141,6 +146,25 @@ type Stat_t struct {
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
// FileInfo_t contains open file information.
|
||||
// This structure is analogous to the FUSE struct fuse_file_info.
|
||||
type FileInfo_t struct {
|
||||
// Open flags: a combination of the fuse.O_* constants.
|
||||
Flags int
|
||||
|
||||
// Use direct I/O on this file. [IGNORED on Windows]
|
||||
DirectIo bool
|
||||
|
||||
// Do not invalidate file cache. [IGNORED on Windows]
|
||||
KeepCache bool
|
||||
|
||||
// File is not seekable. [IGNORED on Windows]
|
||||
NonSeekable bool
|
||||
|
||||
// File handle.
|
||||
Fh uint64
|
||||
}
|
||||
|
||||
/*
|
||||
// Lock_t contains file locking information.
|
||||
// This structure is analogous to the POSIX struct flock.
|
||||
@ -285,6 +309,16 @@ type FileSystemInterface interface {
|
||||
Listxattr(path string, fill func(name string) bool) int
|
||||
}
|
||||
|
||||
// FileSystemOpenEx is the interface that wraps the OpenEx and CreateEx methods.
|
||||
//
|
||||
// OpenEx and CreateEx are similar to Open and Create except that they allow
|
||||
// direct manipulation of the FileInfo_t struct (which is analogous to the
|
||||
// FUSE struct fuse_file_info).
|
||||
type FileSystemOpenEx interface {
|
||||
CreateEx(path string, mode uint32, fi *FileInfo_t) int
|
||||
OpenEx(path string, fi *FileInfo_t) int
|
||||
}
|
||||
|
||||
// FileSystemChflags is the interface that wraps the Chflags method.
|
||||
//
|
||||
// Chflags changes the BSD file flags (Windows file attributes). [OSX and Windows only]
|
||||
|
58
vendor/github.com/billziss-gh/cgofuse/fuse/host.go
generated
vendored
58
vendor/github.com/billziss-gh/cgofuse/fuse/host.go
generated
vendored
@ -219,9 +219,21 @@ func hostOpen(path0 *c_char, fi0 *c_struct_fuse_file_info) (errc0 c_int) {
|
||||
defer recoverAsErrno(&errc0)
|
||||
fsop := hostHandleGet(c_fuse_get_context().private_data).fsop
|
||||
path := c_GoString(path0)
|
||||
errc, rslt := fsop.Open(path, int(fi0.flags))
|
||||
fi0.fh = c_uint64_t(rslt)
|
||||
return c_int(errc)
|
||||
intf, ok := fsop.(FileSystemOpenEx)
|
||||
if ok {
|
||||
fi := FileInfo_t{Flags: int(fi0.flags)}
|
||||
errc := intf.OpenEx(path, &fi)
|
||||
c_hostAsgnCfileinfo(fi0,
|
||||
c_bool(fi.DirectIo),
|
||||
c_bool(fi.KeepCache),
|
||||
c_bool(fi.NonSeekable),
|
||||
c_uint64_t(fi.Fh))
|
||||
return c_int(errc)
|
||||
} else {
|
||||
errc, rslt := fsop.Open(path, int(fi0.flags))
|
||||
fi0.fh = c_uint64_t(rslt)
|
||||
return c_int(errc)
|
||||
}
|
||||
}
|
||||
|
||||
func hostRead(path0 *c_char, buff0 *c_char, size0 c_size_t, ofst0 c_fuse_off_t,
|
||||
@ -403,7 +415,9 @@ func hostFsyncdir(path0 *c_char, datasync c_int, fi0 *c_struct_fuse_file_info) (
|
||||
}
|
||||
|
||||
func hostInit(conn0 *c_struct_fuse_conn_info) (user_data unsafe.Pointer) {
|
||||
defer recover()
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
fctx := c_fuse_get_context()
|
||||
user_data = fctx.private_data
|
||||
host := hostHandleGet(user_data)
|
||||
@ -419,7 +433,9 @@ func hostInit(conn0 *c_struct_fuse_conn_info) (user_data unsafe.Pointer) {
|
||||
}
|
||||
|
||||
func hostDestroy(user_data unsafe.Pointer) {
|
||||
defer recover()
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
if "netbsd" == runtime.GOOS {
|
||||
user_data = c_fuse_get_context().private_data
|
||||
}
|
||||
@ -443,15 +459,33 @@ func hostCreate(path0 *c_char, mode0 c_fuse_mode_t, fi0 *c_struct_fuse_file_info
|
||||
defer recoverAsErrno(&errc0)
|
||||
fsop := hostHandleGet(c_fuse_get_context().private_data).fsop
|
||||
path := c_GoString(path0)
|
||||
errc, rslt := fsop.Create(path, int(fi0.flags), uint32(mode0))
|
||||
if -ENOSYS == errc {
|
||||
errc = fsop.Mknod(path, S_IFREG|uint32(mode0), 0)
|
||||
if 0 == errc {
|
||||
errc, rslt = fsop.Open(path, int(fi0.flags))
|
||||
intf, ok := fsop.(FileSystemOpenEx)
|
||||
if ok {
|
||||
fi := FileInfo_t{Flags: int(fi0.flags)}
|
||||
errc := intf.CreateEx(path, uint32(mode0), &fi)
|
||||
if -ENOSYS == errc {
|
||||
errc = fsop.Mknod(path, S_IFREG|uint32(mode0), 0)
|
||||
if 0 == errc {
|
||||
errc = intf.OpenEx(path, &fi)
|
||||
}
|
||||
}
|
||||
c_hostAsgnCfileinfo(fi0,
|
||||
c_bool(fi.DirectIo),
|
||||
c_bool(fi.KeepCache),
|
||||
c_bool(fi.NonSeekable),
|
||||
c_uint64_t(fi.Fh))
|
||||
return c_int(errc)
|
||||
} else {
|
||||
errc, rslt := fsop.Create(path, int(fi0.flags), uint32(mode0))
|
||||
if -ENOSYS == errc {
|
||||
errc = fsop.Mknod(path, S_IFREG|uint32(mode0), 0)
|
||||
if 0 == errc {
|
||||
errc, rslt = fsop.Open(path, int(fi0.flags))
|
||||
}
|
||||
}
|
||||
fi0.fh = c_uint64_t(rslt)
|
||||
return c_int(errc)
|
||||
}
|
||||
fi0.fh = c_uint64_t(rslt)
|
||||
return c_int(errc)
|
||||
}
|
||||
|
||||
func hostFtruncate(path0 *c_char, size0 c_fuse_off_t, fi0 *c_struct_fuse_file_info) (errc0 c_int) {
|
||||
|
23
vendor/github.com/billziss-gh/cgofuse/fuse/host_cgo.go
generated
vendored
23
vendor/github.com/billziss-gh/cgofuse/fuse/host_cgo.go
generated
vendored
@ -379,6 +379,18 @@ static inline void hostCstatFromFusestat(fuse_stat_t *stbuf,
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void hostAsgnCfileinfo(struct fuse_file_info *fi,
|
||||
bool direct_io,
|
||||
bool keep_cache,
|
||||
bool nonseekable,
|
||||
uint64_t fh)
|
||||
{
|
||||
fi->direct_io = direct_io;
|
||||
fi->keep_cache = keep_cache;
|
||||
fi->nonseekable = nonseekable;
|
||||
fi->fh = fh;
|
||||
}
|
||||
|
||||
static inline int hostFilldir(fuse_fill_dir_t filler, void *buf,
|
||||
char *name, fuse_stat_t *stbuf, fuse_off_t off)
|
||||
{
|
||||
@ -687,6 +699,17 @@ func c_hostCstatFromFusestat(stbuf *c_fuse_stat_t,
|
||||
birthtimNsec,
|
||||
flags)
|
||||
}
|
||||
func c_hostAsgnCfileinfo(fi *c_struct_fuse_file_info,
|
||||
direct_io c_bool,
|
||||
keep_cache c_bool,
|
||||
nonseekable c_bool,
|
||||
fh c_uint64_t) {
|
||||
C.hostAsgnCfileinfo(fi,
|
||||
direct_io,
|
||||
keep_cache,
|
||||
nonseekable,
|
||||
fh)
|
||||
}
|
||||
func c_hostFilldir(filler c_fuse_fill_dir_t,
|
||||
buf unsafe.Pointer, name *c_char, stbuf *c_fuse_stat_t, off c_fuse_off_t) c_int {
|
||||
return C.hostFilldir(filler, buf, name, stbuf, off)
|
||||
|
16
vendor/github.com/billziss-gh/cgofuse/fuse/host_nocgo_windows.go
generated
vendored
16
vendor/github.com/billziss-gh/cgofuse/fuse/host_nocgo_windows.go
generated
vendored
@ -426,6 +426,22 @@ func c_hostCstatFromFusestat(stbuf *c_fuse_stat_t,
|
||||
stbuf.st_birthtim.tv_nsec = uintptr(ctimNsec)
|
||||
}
|
||||
}
|
||||
func c_hostAsgnCfileinfo(fi *c_struct_fuse_file_info,
|
||||
direct_io c_bool,
|
||||
keep_cache c_bool,
|
||||
nonseekable c_bool,
|
||||
fh c_uint64_t) {
|
||||
if direct_io {
|
||||
fi.bits |= 1
|
||||
}
|
||||
if keep_cache {
|
||||
fi.bits |= 2
|
||||
}
|
||||
if nonseekable {
|
||||
fi.bits |= 8
|
||||
}
|
||||
fi.fh = fh
|
||||
}
|
||||
func c_hostFilldir(filler c_fuse_fill_dir_t,
|
||||
buf unsafe.Pointer, name *c_char, stbuf *c_fuse_stat_t, off c_fuse_off_t) c_int {
|
||||
var r uintptr
|
||||
|
215
vendor/github.com/calebcase/tmpfile/LICENSE
generated
vendored
215
vendor/github.com/calebcase/tmpfile/LICENSE
generated
vendored
@ -1,202 +1,19 @@
|
||||
Copyright 2019 Caleb Case <calebcase@gmail.com>
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
2
vendor/github.com/calebcase/tmpfile/doc.go
generated
vendored
2
vendor/github.com/calebcase/tmpfile/doc.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
// Copyright 2019 Caleb Case
|
||||
|
||||
// Package tmpfile provides a cross platform facility for creating temporary
|
||||
// files that are automatically cleaned up (even in the event of an unexpected
|
||||
// process exit).
|
||||
|
7
vendor/github.com/calebcase/tmpfile/tmpfile.go
generated
vendored
7
vendor/github.com/calebcase/tmpfile/tmpfile.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
// Copyright 2019 Caleb Case
|
||||
// +build !windows
|
||||
|
||||
package tmpfile
|
||||
@ -10,6 +11,12 @@ import (
|
||||
// New creates a new temporary file in the directory dir using ioutil.TempFile
|
||||
// and then unlinks the file with os.Remove to ensure the file is deleted when
|
||||
// the calling process exists.
|
||||
//
|
||||
// If dir is the empty string it will default to using os.TempDir() as the
|
||||
// directory for storing the temporary files.
|
||||
//
|
||||
// The target directory dir must already exist or an error will result. New
|
||||
// does not create or remove the directory dir.
|
||||
func New(dir, pattern string) (f *os.File, err error) {
|
||||
f, err = ioutil.TempFile(dir, pattern)
|
||||
if err != nil {
|
||||
|
2
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
2
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
@ -189,6 +189,8 @@ type Marshaler interface {
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
sizVar := SizeVarint(uint64(siz))
|
||||
p.grow(siz + sizVar)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
1
vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
1
vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
@ -527,6 +527,7 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
if epb, ok := pb.(extensionsBytes); ok {
|
||||
ClearExtension(pb, extension)
|
||||
newb, err := encodeExtension(extension, value)
|
||||
if err != nil {
|
||||
return err
|
||||
|
21
vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
generated
vendored
21
vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
generated
vendored
@ -154,6 +154,10 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error)
|
||||
return EncodeExtensionMap(m.extensionsWrite(), data)
|
||||
}
|
||||
|
||||
func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
|
||||
return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
|
||||
}
|
||||
|
||||
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
||||
o := 0
|
||||
for _, e := range m {
|
||||
@ -169,6 +173,23 @@ func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
|
||||
o := 0
|
||||
end := len(data)
|
||||
for _, e := range m {
|
||||
if err := e.Encode(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n := copy(data[end-len(e.enc):], e.enc)
|
||||
if n != len(e.enc) {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
end -= n
|
||||
o += n
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
|
||||
e := m[id]
|
||||
if err := e.Encode(); err != nil {
|
||||
|
18
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
18
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
@ -948,13 +948,19 @@ func isProto3Zero(v reflect.Value) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const GoGoProtoPackageIsVersion2 = true
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
GoGoProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const GoGoProtoPackageIsVersion1 = true
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
GoGoProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
GoGoProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
|
71
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
71
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
@ -43,7 +43,6 @@ package proto
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
@ -205,7 +204,7 @@ func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
@ -225,7 +224,7 @@ func (p *Properties) Parse(s string) {
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
@ -400,6 +399,15 @@ func GetProperties(t reflect.Type) *StructProperties {
|
||||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
@ -441,37 +449,40 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
|
||||
if isOneofMessage {
|
||||
var oots []interface{}
|
||||
_, _, _, oots = om.XXX_OneofFuncs()
|
||||
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
|
17
vendor/github.com/gogo/protobuf/proto/table_marshal.go
generated
vendored
17
vendor/github.com/gogo/protobuf/proto/table_marshal.go
generated
vendored
@ -389,8 +389,13 @@ func (u *marshalInfo) computeMarshalInfo() {
|
||||
// get oneof implementers
|
||||
var oneofImplementers []interface{}
|
||||
// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
if isOneofMessage {
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
}
|
||||
|
||||
// normal fields
|
||||
@ -519,10 +524,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
|
||||
}
|
||||
}
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
|
||||
// wiretype returns the wire encoding of the type.
|
||||
func wiretype(encoding string) uint64 {
|
||||
switch encoding {
|
||||
@ -2968,7 +2969,9 @@ func (p *Buffer) Marshal(pb Message) error {
|
||||
if m, ok := pb.(newMarshaler); ok {
|
||||
siz := m.XXX_Size()
|
||||
p.grow(siz) // make sure buf has enough capacity
|
||||
p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
|
||||
pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz]
|
||||
pp, err = m.XXX_Marshal(pp, p.deterministic)
|
||||
p.buf = append(p.buf, pp...)
|
||||
return err
|
||||
}
|
||||
if m, ok := pb.(Marshaler); ok {
|
||||
|
19
vendor/github.com/gogo/protobuf/proto/table_merge.go
generated
vendored
19
vendor/github.com/gogo/protobuf/proto/table_merge.go
generated
vendored
@ -530,6 +530,25 @@ func (mi *mergeInfo) computeMergeInfo() {
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case isSlice && !isPointer: // E.g. []pb.T
|
||||
mergeInfo := getMergeInfo(tf)
|
||||
zero := reflect.Zero(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// TODO: Make this faster?
|
||||
dstsp := dst.asPointerTo(f.Type)
|
||||
dsts := dstsp.Elem()
|
||||
srcs := src.asPointerTo(f.Type).Elem()
|
||||
for i := 0; i < srcs.Len(); i++ {
|
||||
dsts = reflect.Append(dsts, zero)
|
||||
srcElement := srcs.Index(i).Addr()
|
||||
dstElement := dsts.Index(dsts.Len() - 1).Addr()
|
||||
mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
|
||||
}
|
||||
if dsts.IsNil() {
|
||||
dsts = reflect.MakeSlice(f.Type, 0, 0)
|
||||
}
|
||||
dstsp.Elem().Set(dsts)
|
||||
}
|
||||
case !isPointer:
|
||||
mergeInfo := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
|
22
vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
generated
vendored
22
vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
generated
vendored
@ -371,15 +371,18 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
}
|
||||
|
||||
// Find any types associated with oneof fields.
|
||||
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
|
||||
// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
|
||||
if fn.IsValid() && len(oneofFields) > 0 {
|
||||
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
|
||||
for i := res.Len() - 1; i >= 0; i-- {
|
||||
v := res.Index(i) // interface{}
|
||||
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
if len(oneofFields) > 0 {
|
||||
var oneofImplementers []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
for _, v := range oneofImplementers {
|
||||
tptr := reflect.TypeOf(v) // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
@ -407,11 +410,12 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Get extension ranges, if any.
|
||||
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
if fn.IsValid() {
|
||||
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
|
||||
panic("a message with extensions, but no extensions field in " + t.Name())
|
||||
|
6
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
@ -476,6 +476,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
@ -589,8 +591,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if v.Type().Implements(textMarshalerType) {
|
||||
text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed32 = 5
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
)
|
||||
|
||||
// EncodeVarint returns the varint encoded bytes of v.
|
||||
func EncodeVarint(v uint64) []byte {
|
||||
return protowire.AppendVarint(nil, v)
|
||||
}
|
||||
|
||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||
// This is equal to len(EncodeVarint(v)).
|
||||
func SizeVarint(v uint64) int {
|
||||
return protowire.SizeVarint(v)
|
||||
}
|
||||
|
||||
// DecodeVarint parses a varint encoded integer from b,
|
||||
// returning the integer value and the length of the varint.
|
||||
// It returns (0, 0) if there is a parse error.
|
||||
func DecodeVarint(b []byte) (uint64, int) {
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0
|
||||
}
|
||||
return v, n
|
||||
}
|
||||
|
||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||
// It may be reused between invocations to reduce memory usage.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
idx int
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer initialized with buf,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
// SetDeterministic specifies whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||
b.deterministic = deterministic
|
||||
}
|
||||
|
||||
// SetBuf sets buf as the internal buffer,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func (b *Buffer) SetBuf(buf []byte) {
|
||||
b.buf = buf
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Reset clears the internal buffer of all written and unread data.
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Bytes returns the internal buffer.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf
|
||||
}
|
||||
|
||||
// Unread returns the unread portion of the buffer.
|
||||
func (b *Buffer) Unread() []byte {
|
||||
return b.buf[b.idx:]
|
||||
}
|
||||
|
||||
// Marshal appends the wire-format encoding of m to the buffer.
|
||||
func (b *Buffer) Marshal(m Message) error {
|
||||
var err error
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message in the buffer and
|
||||
// places the decoded results in m.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) Unmarshal(m Message) error {
|
||||
err := UnmarshalMerge(b.Unread(), m)
|
||||
b.idx = len(b.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||
|
||||
func (m *unknownFields) String() string { panic("not implemented") }
|
||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||
|
||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||
// to stdout. This is only intended for debugging.
|
||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||
m := MessageReflect(new(unknownFields))
|
||||
m.SetUnknown(b)
|
||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||
}
|
||||
|
||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||
b.buf = protowire.AppendVarint(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||
b.buf = protowire.AppendBytes(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||
// It does not validate whether v contains valid UTF-8.
|
||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||
b.buf = protowire.AppendString(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||
func (b *Buffer) EncodeMessage(m Message) error {
|
||||
var err error
|
||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||
}
|
||||
|
||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// If alloc is specified, it returns a copy the raw bytes
|
||||
// rather than a sub-slice of the buffer.
|
||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return nil, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
if alloc {
|
||||
v = append([]byte(nil), v...)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return "", protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeMessage(m Message) error {
|
||||
v, err := b.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// DecodeGroup consumes a message group from the buffer.
|
||||
// It assumes that the start group marker has already been consumed and
|
||||
// consumes all bytes until (and including the end group marker).
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeGroup(m Message) error {
|
||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.idx += n
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// consumeGroup parses b until it finds an end group marker, returning
|
||||
// the raw bytes of the message (excluding the end group marker) and the
|
||||
// the total length of the message (including the end group marker).
|
||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||
b0 := b
|
||||
depth := 1 // assume this follows a start group marker
|
||||
for {
|
||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||
if tagLen < 0 {
|
||||
return nil, 0, protowire.ParseError(tagLen)
|
||||
}
|
||||
b = b[tagLen:]
|
||||
|
||||
var valLen int
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
_, valLen = protowire.ConsumeVarint(b)
|
||||
case protowire.Fixed32Type:
|
||||
_, valLen = protowire.ConsumeFixed32(b)
|
||||
case protowire.Fixed64Type:
|
||||
_, valLen = protowire.ConsumeFixed64(b)
|
||||
case protowire.BytesType:
|
||||
_, valLen = protowire.ConsumeBytes(b)
|
||||
case protowire.StartGroupType:
|
||||
depth++
|
||||
case protowire.EndGroupType:
|
||||
depth--
|
||||
default:
|
||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||
}
|
||||
if valLen < 0 {
|
||||
return nil, 0, protowire.ParseError(valLen)
|
||||
}
|
||||
b = b[valLen:]
|
||||
|
||||
if depth == 0 {
|
||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
@ -1,253 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@ -1,427 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||
// Fields within a oneof are not set even if they have a default value.
|
||||
// SetDefaults is recursively called upon any populated message fields.
|
||||
func SetDefaults(m Message) {
|
||||
if m != nil {
|
||||
setDefaults(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(m protoreflect.Message) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if !m.Has(fd) {
|
||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||
v := fd.Default()
|
||||
if fd.Kind() == protoreflect.BytesKind {
|
||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
setDefaults(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
setDefaults(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
setDefaults(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@ -1,63 +1,113 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
// Deprecated: do not use.
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
// Deprecated: No longer returned.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||
type InternalMessageInfo struct{}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Size(m Message) int {
|
||||
return protoV2.Size(MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||
}
|
||||
|
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@ -1,48 +1,13 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
@ -51,300 +16,43 @@ type generatedDiscarder interface {
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
if m != nil {
|
||||
discardUnknown(MessageReflect(m))
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
func discardUnknown(m protoreflect.Message) {
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
discardUnknown(m.Get(fd).Message())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
discardUnknown(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
discardUnknown(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
// Discard unknown fields.
|
||||
if len(m.GetUnknown()) > 0 {
|
||||
m.SetUnknown(nil)
|
||||
}
|
||||
}
|
||||
|
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
@ -1,301 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user