mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 16:34:30 +01:00
vendor: Update AzureSDK version to latest one, fixes failing integration tests
This commit is contained in:
parent
5b27702b61
commit
233507bfe0
6
go.mod
6
go.mod
@ -4,7 +4,7 @@ require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.23.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
|
||||
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6
|
||||
@ -17,6 +17,7 @@ require (
|
||||
github.com/djherbis/times v1.0.1
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible
|
||||
github.com/go-ini/ini v1.37.0 // indirect
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 // indirect
|
||||
github.com/golang/protobuf v1.1.0 // indirect
|
||||
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c // indirect
|
||||
@ -25,6 +26,7 @@ require (
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
|
||||
github.com/kisielk/errcheck v1.1.0 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.2 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
@ -49,12 +51,14 @@ require (
|
||||
github.com/xanzy/ssh-agent v0.2.0
|
||||
github.com/yunify/qingstor-sdk-go v2.2.14+incompatible
|
||||
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 // indirect
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862
|
||||
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
|
||||
golang.org/x/sys v0.0.0-20180616030259-6c888cc515d3
|
||||
golang.org/x/text v0.3.0
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
|
||||
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48 // indirect
|
||||
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9
|
||||
google.golang.org/appengine v1.1.0 // indirect
|
||||
gopkg.in/ini.v1 v1.38.2 // indirect
|
||||
|
13
go.sum
13
go.sum
@ -6,6 +6,8 @@ github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 h1:3c3mGlh
|
||||
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e h1:Ix5oKbq0MlolI+T4EPCL9sddfEw6LgRMpC+qx0Kz5/E=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7 h1:/dp3cg/uwEn3nCIvapCpvwMLMCW34Z9U/UOeTroS6p0=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f h1:2h0zBHX3qNDltgnb2FiMJWYTcqJVDdUeHQU2/5CTc5w=
|
||||
@ -30,6 +32,8 @@ github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible h1:ZFvUIiBbGhDY
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/go-ini/ini v1.37.0 h1:/FpMfveJbc7ExTTDgT5nL9Vw+aZdst/c2dOxC931U+M=
|
||||
github.com/go-ini/ini v1.37.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0=
|
||||
@ -46,6 +50,10 @@ github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpR
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0 h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
|
||||
@ -94,6 +102,8 @@ github.com/yunify/qingstor-sdk-go v2.2.14+incompatible h1:FKJYNihc8AXfaSm1R8Rpco
|
||||
github.com/yunify/qingstor-sdk-go v2.2.14+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
|
||||
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6 h1:Y9MTpro8EV2sz/pZRxSgNsvSfMXLmIHhQO4BGv2My/Q=
|
||||
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 h1:00BeQWmeaGazuOrq8Q5K5d3/cHaGuFrZzpaHBXfrsUA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862 h1:JZi6BqOZ+iSgmLWe6llhGrNnEnK+YB/MRkStwnEfbqM=
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a h1:1Fy38jwe/QZhQfFQBy6dMj9F/WU1C+jo3/zLNr/WhW4=
|
||||
@ -106,6 +116,9 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48 h1:PIz+xUHW4G/jqfFWeKhQ96ZV/t2HDsXfWj923rV0bZY=
|
||||
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9 h1:HcrS+AkAU4PJZAVkIazs99Rc+swHNe1NnV4Gm3RSDCo=
|
||||
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
|
12
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/access_conditions.go
generated
vendored
12
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/access_conditions.go
generated
vendored
@ -4,8 +4,8 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// HTTPAccessConditions identifies standard HTTP access conditions which you optionally set.
|
||||
type HTTPAccessConditions struct {
|
||||
// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
|
||||
type ModifiedAccessConditions struct {
|
||||
IfModifiedSince time.Time
|
||||
IfUnmodifiedSince time.Time
|
||||
IfMatch ETag
|
||||
@ -13,7 +13,7 @@ type HTTPAccessConditions struct {
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
|
||||
func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
|
||||
if !ac.IfModifiedSince.IsZero() {
|
||||
ims = &ac.IfModifiedSince
|
||||
}
|
||||
@ -31,16 +31,14 @@ func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *
|
||||
|
||||
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
|
||||
type ContainerAccessConditions struct {
|
||||
HTTPAccessConditions
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
}
|
||||
|
||||
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
|
||||
type BlobAccessConditions struct {
|
||||
HTTPAccessConditions
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
AppendBlobAccessConditions
|
||||
PageBlobAccessConditions
|
||||
}
|
||||
|
||||
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
|
||||
|
26
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/atomicmorph.go
generated
vendored
26
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/atomicmorph.go
generated
vendored
@ -5,12 +5,14 @@ import "sync/atomic"
|
||||
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type AtomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
|
||||
type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
|
||||
|
||||
const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
|
||||
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
|
||||
if target == nil || morpher == nil {
|
||||
panic("target and morpher mut not be nil")
|
||||
panic(targetAndMorpherMustNotBeNil)
|
||||
}
|
||||
for {
|
||||
currentVal := atomic.LoadInt32(target)
|
||||
@ -24,12 +26,12 @@ func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
|
||||
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type AtomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
|
||||
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{} {
|
||||
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
|
||||
if target == nil || morpher == nil {
|
||||
panic("target and morpher mut not be nil")
|
||||
panic(targetAndMorpherMustNotBeNil)
|
||||
}
|
||||
for {
|
||||
currentVal := atomic.LoadUint32(target)
|
||||
@ -43,12 +45,12 @@ func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{}
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type AtomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
|
||||
type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
|
||||
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
|
||||
if target == nil || morpher == nil {
|
||||
panic("target and morpher mut not be nil")
|
||||
panic(targetAndMorpherMustNotBeNil)
|
||||
}
|
||||
for {
|
||||
currentVal := atomic.LoadInt64(target)
|
||||
@ -62,12 +64,12 @@ func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type AtomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
|
||||
type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{} {
|
||||
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
|
||||
if target == nil || morpher == nil {
|
||||
panic("target and morpher mut not be nil")
|
||||
panic(targetAndMorpherMustNotBeNil)
|
||||
}
|
||||
for {
|
||||
currentVal := atomic.LoadUint64(target)
|
||||
|
122
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/highlevel.go
generated
vendored
122
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/highlevel.go
generated
vendored
@ -12,10 +12,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// CommonResponseHeaders returns the headers common to all blob REST API responses.
|
||||
// CommonResponse returns the headers common to all blob REST API responses.
|
||||
type CommonResponse interface {
|
||||
// ETag returns the value for header ETag.
|
||||
ETag() ETag
|
||||
@ -42,6 +44,7 @@ type UploadToBlockBlobOptions struct {
|
||||
BlockSize int64
|
||||
|
||||
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
|
||||
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
|
||||
Progress pipeline.ProgressReceiver
|
||||
|
||||
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
|
||||
@ -65,12 +68,26 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||
if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes {
|
||||
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes))
|
||||
}
|
||||
if o.BlockSize == 0 {
|
||||
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
||||
}
|
||||
size := int64(len(b))
|
||||
|
||||
if size <= BlockBlobMaxUploadBlobBytes {
|
||||
bufferSize := int64(len(b))
|
||||
if o.BlockSize == 0 {
|
||||
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
|
||||
if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
|
||||
return nil, errors.New("Buffer is too large to upload to a block blob")
|
||||
}
|
||||
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
||||
} else {
|
||||
o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
|
||||
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
|
||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
||||
}
|
||||
// StageBlock will be called with blockSize blocks and a parallelism of (BufferSize / BlockSize).
|
||||
}
|
||||
}
|
||||
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||
// If the size can fit in 1 Upload call, do it this way
|
||||
var body io.ReadSeeker = bytes.NewReader(b)
|
||||
if o.Progress != nil {
|
||||
@ -79,7 +96,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
|
||||
}
|
||||
|
||||
var numBlocks = uint16(((size - 1) / o.BlockSize) + 1)
|
||||
var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
|
||||
if numBlocks > BlockBlobMaxBlocks {
|
||||
panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks))
|
||||
}
|
||||
@ -90,7 +107,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||
|
||||
err := doBatchTransfer(ctx, batchTransferOptions{
|
||||
operationName: "UploadBufferToBlockBlob",
|
||||
transferSize: size,
|
||||
transferSize: bufferSize,
|
||||
chunkSize: o.BlockSize,
|
||||
parallelism: o.Parallelism,
|
||||
operation: func(offset int64, count int64) error {
|
||||
@ -115,7 +132,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
|
||||
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
|
||||
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
|
||||
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions)
|
||||
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil)
|
||||
return err
|
||||
},
|
||||
})
|
||||
@ -147,10 +164,9 @@ func UploadFileToBlockBlob(ctx context.Context, file *os.File,
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
|
||||
|
||||
// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions.
|
||||
// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
|
||||
type DownloadFromBlobOptions struct {
|
||||
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
|
||||
BlockSize int64
|
||||
@ -168,10 +184,9 @@ type DownloadFromBlobOptions struct {
|
||||
RetryReaderOptionsPerBlock RetryReaderOptions
|
||||
}
|
||||
|
||||
// downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
|
||||
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions,
|
||||
initialDownloadResponse *DownloadResponse) error {
|
||||
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
|
||||
// Validate parameters, and set defaults.
|
||||
if o.BlockSize < 0 {
|
||||
panic("BlockSize option must be >= 0")
|
||||
@ -193,7 +208,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
|
||||
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
|
||||
} else {
|
||||
// If we don't have the length at all, get it
|
||||
dr, err := blobURL.Download(ctx, 0, CountToEnd, ac, false)
|
||||
dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -211,11 +226,11 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
|
||||
|
||||
err := doBatchTransfer(ctx, batchTransferOptions{
|
||||
operationName: "downloadBlobToBuffer",
|
||||
transferSize: count,
|
||||
transferSize: count,
|
||||
chunkSize: o.BlockSize,
|
||||
parallelism: o.Parallelism,
|
||||
operation: func(chunkStart int64, count int64) error {
|
||||
dr, err := blobURL.Download(ctx, chunkStart+ offset, count, ac, false)
|
||||
dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false)
|
||||
body := dr.Body(o.RetryReaderOptionsPerBlock)
|
||||
if o.Progress != nil {
|
||||
rangeProgress := int64(0)
|
||||
@ -241,18 +256,18 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
|
||||
// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions) error {
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, count, ac, b, o, nil)
|
||||
b []byte, o DownloadFromBlobOptions) error {
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil)
|
||||
}
|
||||
|
||||
// DownloadBlobToFile downloads an Azure file to a local file.
|
||||
// DownloadBlobToFile downloads an Azure blob to a local file.
|
||||
// The file would be truncated if the size doesn't match.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
ac BlobAccessConditions, file *os.File, o DownloadFromBlobOptions) error {
|
||||
file *os.File, o DownloadFromBlobOptions) error {
|
||||
// 1. Validate parameters.
|
||||
if file == nil {
|
||||
panic("file must not be nil")
|
||||
@ -262,8 +277,8 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
|
||||
var size int64
|
||||
|
||||
if count == CountToEnd {
|
||||
// Try to get Azure file's size
|
||||
props, err := blobURL.GetProperties(ctx, ac)
|
||||
// Try to get Azure blob's size
|
||||
props, err := blobURL.GetProperties(ctx, o.AccessConditions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -272,7 +287,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
|
||||
size = count
|
||||
}
|
||||
|
||||
// 3. Compare and try to resize local file's size if it doesn't match Azure file's size.
|
||||
// 3. Compare and try to resize local file's size if it doesn't match Azure blob's size.
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -284,19 +299,18 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
// 4. Set mmap and call DownloadAzureFileToBuffer.
|
||||
// 4. Set mmap and call downloadBlobToBuffer.
|
||||
m, err := newMMF(file, true, 0, int(size))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer m.unmap()
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, size, ac, m, o, nil)
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil)
|
||||
} else { // if the blob's size is 0, there is no need in downloading it
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// BatchTransferOptions identifies options used by doBatchTransfer.
|
||||
@ -374,7 +388,10 @@ func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL
|
||||
result, err := uploadStream(ctx, reader,
|
||||
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
|
||||
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
|
||||
return result.(CommonResponse), err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.(CommonResponse), nil
|
||||
}
|
||||
|
||||
type uploadStreamToBlockBlobOptions struct {
|
||||
@ -396,7 +413,7 @@ func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32,
|
||||
return nil
|
||||
}
|
||||
// Else, upload a staged block...
|
||||
AtomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
|
||||
atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
|
||||
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
|
||||
if startVal < num {
|
||||
return num, nil
|
||||
@ -404,7 +421,7 @@ func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32,
|
||||
return startVal, nil
|
||||
})
|
||||
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
|
||||
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{})
|
||||
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -416,7 +433,7 @@ func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{},
|
||||
}
|
||||
// Multiple blocks staged, commit them all now
|
||||
blockID := newUuidBlockID(t.blockIDPrefix)
|
||||
blockIDs := make([]string, t.maxBlockNum + 1)
|
||||
blockIDs := make([]string, t.maxBlockNum+1)
|
||||
for bn := uint32(0); bn <= t.maxBlockNum; bn++ {
|
||||
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
|
||||
}
|
||||
@ -436,7 +453,28 @@ type UploadStreamOptions struct {
|
||||
BufferSize int
|
||||
}
|
||||
|
||||
type firstErr struct {
|
||||
lock sync.Mutex
|
||||
finalError error
|
||||
}
|
||||
|
||||
func (fe *firstErr) set(err error) {
|
||||
fe.lock.Lock()
|
||||
if fe.finalError == nil {
|
||||
fe.finalError = err
|
||||
}
|
||||
fe.lock.Unlock()
|
||||
}
|
||||
|
||||
func (fe *firstErr) get() (err error) {
|
||||
fe.lock.Lock()
|
||||
err = fe.finalError
|
||||
fe.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
|
||||
firstErr := firstErr{}
|
||||
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
|
||||
defer cancel()
|
||||
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
|
||||
@ -463,9 +501,12 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
|
||||
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
|
||||
wg.Done() // Indicate this buffer was sent
|
||||
if nil != err {
|
||||
// NOTE: finalErr could be assigned to multiple times here which is OK,
|
||||
// some error will be returned.
|
||||
firstErr.set(err)
|
||||
cancel()
|
||||
}
|
||||
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can use reuse this buffer now
|
||||
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -490,7 +531,7 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
|
||||
buffer = <-incoming
|
||||
}
|
||||
n, err := io.ReadFull(reader, buffer)
|
||||
if err != nil {
|
||||
if err != nil { // Less than len(buffer) bytes were read
|
||||
buffer = buffer[:n] // Make slice match the # of read bytes
|
||||
}
|
||||
if len(buffer) > 0 {
|
||||
@ -499,12 +540,21 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
|
||||
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
|
||||
}
|
||||
if err != nil { // The reader is done, no more outgoing buffers
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
|
||||
} else {
|
||||
firstErr.set(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
|
||||
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
|
||||
wg.Wait() // Wait for all pending outgoing messages to complete
|
||||
// After all blocks uploaded, commit them to the blob & return the result
|
||||
return t.end(ctx)
|
||||
err := firstErr.get()
|
||||
if err == nil {
|
||||
// If no error, after all blocks uploaded, commit them to the blob & return the result
|
||||
return t.end(ctx)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
61
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/parsing_urls.go
generated
vendored
61
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/parsing_urls.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
@ -14,21 +15,47 @@ const (
|
||||
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
|
||||
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
||||
type BlobURLParts struct {
|
||||
Scheme string // Ex: "https://"
|
||||
Host string // Ex: "account.blob.core.windows.net"
|
||||
ContainerName string // "" if no container
|
||||
BlobName string // "" if no blob
|
||||
Snapshot string // "" if not a snapshot
|
||||
SAS SASQueryParameters
|
||||
UnparsedParams string
|
||||
Scheme string // Ex: "https://"
|
||||
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
|
||||
IPEndpointStyleInfo IPEndpointStyleInfo
|
||||
ContainerName string // "" if no container
|
||||
BlobName string // "" if no blob
|
||||
Snapshot string // "" if not a snapshot
|
||||
SAS SASQueryParameters
|
||||
UnparsedParams string
|
||||
}
|
||||
|
||||
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
|
||||
// Ex: "https://10.132.141.33/accountname/containername"
|
||||
type IPEndpointStyleInfo struct {
|
||||
AccountName string // "" if not using IP endpoint style
|
||||
}
|
||||
|
||||
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
|
||||
// http(s)://IP(:port)/storageaccount/container/...
|
||||
// As url's Host property, host could be both host or host:port
|
||||
func isIPEndpointStyle(host string) bool {
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
if h, _, err := net.SplitHostPort(host); err == nil {
|
||||
host = h
|
||||
}
|
||||
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
|
||||
// In this case, eliminate the '[' and ']' in the URL.
|
||||
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
|
||||
if host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
}
|
||||
return net.ParseIP(host) != nil
|
||||
}
|
||||
|
||||
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
|
||||
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
|
||||
func NewBlobURLParts(u url.URL) BlobURLParts {
|
||||
up := BlobURLParts{
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
}
|
||||
|
||||
// Find the container & blob names (if any)
|
||||
@ -37,10 +64,17 @@ func NewBlobURLParts(u url.URL) BlobURLParts {
|
||||
if path[0] == '/' {
|
||||
path = path[1:] // If path starts with a slash, remove it
|
||||
}
|
||||
if isIPEndpointStyle(up.Host) {
|
||||
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
|
||||
up.IPEndpointStyleInfo.AccountName = path
|
||||
} else {
|
||||
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
|
||||
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
|
||||
}
|
||||
}
|
||||
|
||||
// Find the next slash (if it exists)
|
||||
containerEndIndex := strings.Index(path, "/")
|
||||
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
||||
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
|
||||
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
||||
up.ContainerName = path
|
||||
} else {
|
||||
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
|
||||
@ -77,6 +111,9 @@ func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
|
||||
// field contains the SAS, snapshot, and unparsed query parameters.
|
||||
func (up BlobURLParts) URL() url.URL {
|
||||
path := ""
|
||||
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
|
||||
path += "/" + up.IPEndpointStyleInfo.AccountName
|
||||
}
|
||||
// Concatenate container & blob names (if they exist)
|
||||
if up.ContainerName != "" {
|
||||
path += "/" + up.ContainerName
|
||||
|
1
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/sas_service.go
generated
vendored
1
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/sas_service.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
)
|
||||
|
||||
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
|
||||
type BlobSASSignatureValues struct {
|
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
|
20
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_append_blob.go
generated
vendored
20
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_append_blob.go
generated
vendored
@ -45,7 +45,7 @@ func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
|
||||
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||
return ab.abClient.Create(ctx, 0, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
@ -56,17 +56,23 @@ func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
|
||||
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac BlobAccessConditions) (*AppendBlobAppendBlockResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendBlobAccessConditions.pointers()
|
||||
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
|
||||
return ab.abClient.AppendBlock(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
transactionalMD5, ac.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// AppendBlobAccessConditions identifies append blob-specific access conditions which you optionally set.
|
||||
type AppendBlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
AppendPositionAccessConditions
|
||||
}
|
||||
|
||||
// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
|
||||
type AppendPositionAccessConditions struct {
|
||||
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
|
||||
// only if the append position is equal to a value.
|
||||
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
|
||||
@ -83,7 +89,7 @@ type AppendBlobAccessConditions struct {
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac AppendBlobAccessConditions) pointers() (iape *int64, imsltoe *int64) {
|
||||
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
|
||||
if ac.IfAppendPositionEqual < -1 {
|
||||
panic("IfAppendPositionEqual can't be less than -1")
|
||||
}
|
||||
|
32
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_blob.go
generated
vendored
32
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_blob.go
generated
vendored
@ -64,13 +64,14 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL {
|
||||
}
|
||||
|
||||
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
|
||||
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
|
||||
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
|
||||
var xRangeGetContentMD5 *bool
|
||||
if rangeGetContentMD5 {
|
||||
xRangeGetContentMD5 = &rangeGetContentMD5
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
dr, err := b.blobClient.Download(ctx, nil, nil,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
|
||||
@ -90,7 +91,7 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo
|
||||
// Note that deleting a blob also deletes all its snapshots.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
|
||||
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
@ -114,7 +115,7 @@ func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobSetTier
|
||||
// GetBlobProperties returns the blob's properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
|
||||
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
@ -122,7 +123,7 @@ func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*B
|
||||
// SetBlobHTTPHeaders changes a blob's HTTP headers.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.SetHTTPHeaders(ctx, nil,
|
||||
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
|
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
@ -132,7 +133,7 @@ func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobA
|
||||
// SetBlobMetadata changes a blob's metadata.
|
||||
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
|
||||
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
@ -143,14 +144,14 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA
|
||||
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
|
||||
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
||||
// performance hit.
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
|
||||
}
|
||||
|
||||
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
||||
// 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*BlobAcquireLeaseResponse, error) {
|
||||
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
@ -158,7 +159,7 @@ func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration i
|
||||
|
||||
// RenewLease renews the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobRenewLeaseResponse, error) {
|
||||
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.RenewLease(ctx, leaseID, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
@ -166,7 +167,7 @@ func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessCo
|
||||
|
||||
// ReleaseLease releases the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobReleaseLeaseResponse, error) {
|
||||
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
@ -175,7 +176,7 @@ func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccess
|
||||
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
|
||||
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac HTTPAccessConditions) (*BlobBreakLeaseResponse, error) {
|
||||
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
@ -183,7 +184,7 @@ func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac
|
||||
|
||||
// ChangeLease changes the blob's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*BlobChangeLeaseResponse, error) {
|
||||
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
|
||||
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
@ -201,10 +202,9 @@ func leasePeriodPointer(period int32) (p *int32) {
|
||||
|
||||
// StartCopyFromURL copies the data at the source URL to a blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
|
||||
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac BlobAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.HTTPAccessConditions.pointers()
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.HTTPAccessConditions.pointers()
|
||||
srcLeaseID := srcac.LeaseAccessConditions.pointers()
|
||||
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
||||
|
||||
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
|
||||
@ -212,7 +212,7 @@ func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata
|
||||
srcIfMatchETag, srcIfNoneMatchETag,
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
||||
dstIfMatchETag, dstIfNoneMatchETag,
|
||||
dstLeaseID, srcLeaseID, nil)
|
||||
dstLeaseID, nil)
|
||||
}
|
||||
|
||||
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
|
||||
|
13
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_block_blob.go
generated
vendored
13
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_block_blob.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockBlobMaxPutBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
||||
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
||||
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
|
||||
|
||||
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
|
||||
@ -59,7 +59,7 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return bb.bbClient.Upload(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
|
||||
@ -70,16 +70,15 @@ func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTT
|
||||
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
|
||||
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions) (*BlockBlobStageBlockResponse, error) {
|
||||
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, nil, ac.pointers(), nil)
|
||||
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) {
|
||||
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, transactionalMD5, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
||||
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, ac LeaseAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
sourceURLStr := sourceURL.String()
|
||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, &sourceURLStr, httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil)
|
||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
||||
@ -90,7 +89,7 @@ func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID stri
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
|
||||
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
|
||||
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
|
||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
|
16
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_container.go
generated
vendored
16
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_container.go
generated
vendored
@ -92,7 +92,7 @@ func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions)
|
||||
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||
}
|
||||
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
@ -111,7 +111,7 @@ func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac Con
|
||||
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
|
||||
}
|
||||
ifModifiedSince, _, _, _ := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
|
||||
}
|
||||
|
||||
@ -183,14 +183,14 @@ func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAcce
|
||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
|
||||
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*ContainerAcquireLeaseResponse, error) {
|
||||
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
@ -198,28 +198,28 @@ func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, durat
|
||||
|
||||
// RenewLease renews the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerRenewLeaseResponse, error) {
|
||||
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ReleaseLease releases the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerReleaseLeaseResponse, error) {
|
||||
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// BreakLease breaks the container's previously-acquired lease (if it exists).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac HTTPAccessConditions) (*ContainerBreakLeaseResponse, error) {
|
||||
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ChangeLease changes the container's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*ContainerChangeLeaseResponse, error) {
|
||||
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
42
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_page_blob.go
generated
vendored
42
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_page_blob.go
generated
vendored
@ -47,28 +47,28 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
|
||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// CreatePageBlob creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
||||
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
|
||||
if sequenceNumber < 0 {
|
||||
panic("sequenceNumber must be greater than or equal to 0")
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
return pb.pbClient.Create(ctx, 0, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Create(ctx, 0, size, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
|
||||
metadata, ac.LeaseAccessConditions.pointers(),
|
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &size, &sequenceNumber, nil)
|
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
|
||||
}
|
||||
|
||||
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac BlobAccessConditions) (*PageBlobUploadPagesResponse, error) {
|
||||
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) {
|
||||
count := validateSeekableStreamAt0AndGetCount(body)
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPages(ctx, body, count, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
@ -77,9 +77,9 @@ func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.Rea
|
||||
|
||||
// ClearPages frees the specified pages from the page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageBlobClearPagesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
|
||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.ClearPages(ctx, 0, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
@ -90,7 +90,7 @@ func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64,
|
||||
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.GetPageRanges(ctx, nil, nil,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
@ -100,7 +100,7 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int
|
||||
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
@ -114,7 +114,7 @@ func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessCondi
|
||||
if size%PageBlobPageBytes != 0 {
|
||||
panic("Size must be a multiple of PageBlobPageBytes (512)")
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
@ -129,7 +129,7 @@ func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceN
|
||||
if action == SequenceNumberActionIncrement {
|
||||
sn = nil
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
|
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
|
||||
sn, nil)
|
||||
@ -141,7 +141,7 @@ func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceN
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
|
||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
|
||||
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
qp := source.Query()
|
||||
qp.Set("snapshot", snapshot)
|
||||
source.RawQuery = qp.Encode()
|
||||
@ -170,8 +170,14 @@ func (pr PageRange) pointers() *string {
|
||||
return &asString
|
||||
}
|
||||
|
||||
// PageBlobAccessConditions identifies page blob-specific access conditions which you optionally set.
|
||||
type PageBlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
SequenceNumberAccessConditions
|
||||
}
|
||||
|
||||
// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
|
||||
type SequenceNumberAccessConditions struct {
|
||||
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is less than a value.
|
||||
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
|
||||
@ -195,7 +201,7 @@ type PageBlobAccessConditions struct {
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac PageBlobAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
|
||||
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
|
||||
if ac.IfSequenceNumberLessThan < -1 {
|
||||
panic("Ifsequencenumberlessthan can't be less than -1")
|
||||
}
|
||||
|
6
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_service.go
generated
vendored
6
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/url_service.go
generated
vendored
@ -81,7 +81,7 @@ func appendToURLPath(u url.URL, name string) url.URL {
|
||||
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment. For more information, see
|
||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersResponse, error) {
|
||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
|
||||
}
|
||||
@ -89,8 +89,8 @@ func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o
|
||||
// ListContainersOptions defines options available when calling ListContainers.
|
||||
type ListContainersSegmentOptions struct {
|
||||
Detail ListContainersDetail // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
MaxResults int32 // 0 means unspecified
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
MaxResults int32 // 0 means unspecified
|
||||
// TODO: update swagger to generate this type?
|
||||
}
|
||||
|
||||
|
@ -17,12 +17,12 @@ import (
|
||||
|
||||
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
||||
// storage account's name and either its primary or secondary key.
|
||||
func NewSharedKeyCredential(accountName, accountKey string) *SharedKeyCredential {
|
||||
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
|
||||
bytes, err := base64.StdEncoding.DecodeString(accountKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return &SharedKeyCredential{}, err
|
||||
}
|
||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}
|
||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
|
||||
}
|
||||
|
||||
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
||||
|
29
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_token.go
generated
vendored
29
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_credential_token.go
generated
vendored
@ -11,6 +11,10 @@ import (
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// TokenRefresher represents a callback method that you write; this method is called periodically
|
||||
// so you can refresh the token credential's value.
|
||||
type TokenRefresher func(credential TokenCredential) time.Duration
|
||||
|
||||
// TokenCredential represents a token credential (which is also a pipeline.Factory).
|
||||
type TokenCredential interface {
|
||||
Credential
|
||||
@ -20,12 +24,15 @@ type TokenCredential interface {
|
||||
|
||||
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
|
||||
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
|
||||
// tokenRefresher, then the function you pass will be called immediately (so it can refresh and change the
|
||||
// TokenCredential's token value by calling SetToken; your tokenRefresher function must return a time.Duration
|
||||
// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the
|
||||
// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
|
||||
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
|
||||
func NewTokenCredential(initialToken string, tokenRefresher func(credential TokenCredential) time.Duration) TokenCredential {
|
||||
// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
|
||||
// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a
|
||||
// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
|
||||
func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential {
|
||||
tc := &tokenCredential{}
|
||||
tc.SetToken(initialToken) // We dont' set it above to guarantee atomicity
|
||||
tc.SetToken(initialToken) // We don't set it above to guarantee atomicity
|
||||
if tokenRefresher == nil {
|
||||
return tc // If no callback specified, return the simple tokenCredential
|
||||
}
|
||||
@ -68,7 +75,7 @@ type tokenCredential struct {
|
||||
|
||||
// The members below are only used if the user specified a tokenRefresher callback function.
|
||||
timer *time.Timer
|
||||
tokenRefresher func(c TokenCredential) time.Duration
|
||||
tokenRefresher TokenRefresher
|
||||
lock sync.Mutex
|
||||
stopped bool
|
||||
}
|
||||
@ -84,7 +91,7 @@ func (f *tokenCredential) SetToken(token string) { f.token.Store(token) }
|
||||
|
||||
// startRefresh calls refresh which immediately calls tokenRefresher
|
||||
// and then starts a timer to call tokenRefresher in the future.
|
||||
func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) time.Duration) {
|
||||
func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) {
|
||||
f.tokenRefresher = tokenRefresher
|
||||
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
|
||||
f.refresh()
|
||||
@ -95,11 +102,13 @@ func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) ti
|
||||
// in order to refresh the token again in the future.
|
||||
func (f *tokenCredential) refresh() {
|
||||
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
|
||||
f.lock.Lock()
|
||||
if !f.stopped {
|
||||
f.timer = time.AfterFunc(d, f.refresh)
|
||||
if d > 0 { // If duration is 0 or negative, refresher wants to not be called again
|
||||
f.lock.Lock()
|
||||
if !f.stopped {
|
||||
f.timer = time.AfterFunc(d, f.refresh)
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}
|
||||
|
||||
// stopRefresh stops any pending timer and sets stopped field to true to prevent
|
||||
|
34
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_request_log.go
generated
vendored
34
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_request_log.go
generated
vendored
@ -109,6 +109,7 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
||||
})
|
||||
}
|
||||
|
||||
// redactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func redactSigQueryParam(rawQuery string) (bool, string) {
|
||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
||||
@ -135,7 +136,8 @@ func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
||||
req = request.Copy()
|
||||
req.Request.URL.RawQuery = rawQuery
|
||||
}
|
||||
return req.Request
|
||||
|
||||
return prepareRequestForServiceLogging(req)
|
||||
}
|
||||
|
||||
func stack() []byte {
|
||||
@ -148,3 +150,33 @@ func stack() []byte {
|
||||
buf = make([]byte, 2*len(buf))
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Redact phase useful for blob and file service only. For other services,
|
||||
// this method can directly return request.Request.
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
|
||||
req = request.Copy()
|
||||
url, err := url.Parse(req.Header.Get(key))
|
||||
if err == nil {
|
||||
if sigFound, rawQuery := redactSigQueryParam(url.RawQuery); sigFound {
|
||||
url.RawQuery = rawQuery
|
||||
req.Header.Set(xMsCopySourceHeader, url.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return req.Request
|
||||
}
|
||||
|
||||
const xMsCopySourceHeader = "x-ms-copy-source"
|
||||
|
||||
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
|
||||
for keyInHeader := range header {
|
||||
if strings.EqualFold(keyInHeader, key) {
|
||||
return true, keyInHeader
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
118
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_retry.go
generated
vendored
118
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_policy_retry.go
generated
vendored
@ -2,15 +2,16 @@ package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
)
|
||||
|
||||
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
||||
@ -57,11 +58,11 @@ type RetryOptions struct {
|
||||
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
|
||||
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
|
||||
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
|
||||
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
|
||||
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
|
||||
}
|
||||
|
||||
func (o RetryOptions) retryReadsFromSecondaryHost() string {
|
||||
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
|
||||
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
|
||||
//return "" // This is for non-blob SDKs
|
||||
}
|
||||
|
||||
@ -221,9 +222,27 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||
considerSecondary = false
|
||||
action = "Retry: Secondary URL returned 404"
|
||||
case err != nil:
|
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation
|
||||
if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) {
|
||||
action = "Retry: net.Error and Temporary() or Timeout()"
|
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
||||
// Use ServiceCode to verify if the error is related to storage service-side,
|
||||
// ServiceCode is set only when error related to storage service happened.
|
||||
if stErr, ok := err.(StorageError); ok {
|
||||
if stErr.Temporary() {
|
||||
action = "Retry: StorageError with error service code and Temporary()"
|
||||
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
||||
action = "Retry: StorageError with success status code"
|
||||
} else {
|
||||
action = "NoRetry: StorageError not Temporary() and without retriable status code"
|
||||
}
|
||||
} else if netErr, ok := err.(net.Error); ok {
|
||||
// Use non-retriable net.Error list, but not retriable list.
|
||||
// As there are errors without Temporary() implementation,
|
||||
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
||||
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
||||
if !isNotRetriable(netErr) {
|
||||
action = "Retry: net.Error and not in the non-retriable list"
|
||||
} else {
|
||||
action = "NoRetry: net.Error and in the non-retriable list"
|
||||
}
|
||||
} else {
|
||||
action = "NoRetry: unrecognized error"
|
||||
}
|
||||
@ -237,11 +256,18 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||
if err != nil {
|
||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
||||
} else {
|
||||
// TODO: Right now, we've decided to leak the per-try Context until the user's Context is canceled.
|
||||
// Another option is that we wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
||||
_ = tryCancel // So, for now, we don't call cancel: cancel()
|
||||
if response == nil || response.Response() == nil {
|
||||
// We do panic in the case response or response.Response() is nil,
|
||||
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
||||
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
||||
// as in this case, current per-try has nothing to do in future.
|
||||
panic("invalid state, response should not be nil when the operation is executed successfully")
|
||||
}
|
||||
|
||||
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
|
||||
}
|
||||
break // Don't retry
|
||||
}
|
||||
@ -259,6 +285,78 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||
})
|
||||
}
|
||||
|
||||
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
||||
type contextCancelReadCloser struct {
|
||||
cf context.CancelFunc
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.body.Read(p)
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Close() error {
|
||||
err := rc.body.Close()
|
||||
if rc.cf != nil {
|
||||
rc.cf()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// isNotRetriable checks if the provided net.Error isn't retriable.
|
||||
func isNotRetriable(errToParse net.Error) bool {
|
||||
// No error, so this is NOT retriable.
|
||||
if errToParse == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
||||
if errToParse.Temporary() || errToParse.Timeout() {
|
||||
return false
|
||||
}
|
||||
|
||||
genericErr := error(errToParse)
|
||||
|
||||
// From here all the error are neither Temporary() nor Timeout().
|
||||
switch err := errToParse.(type) {
|
||||
case *net.OpError:
|
||||
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
||||
if err.Err == nil {
|
||||
return true
|
||||
}
|
||||
genericErr = err.Err
|
||||
}
|
||||
|
||||
switch genericErr.(type) {
|
||||
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
|
||||
// If the error is one of the ones listed, then it is NOT retriable.
|
||||
return true
|
||||
}
|
||||
|
||||
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
|
||||
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
|
||||
if strings.Contains(genericErr.Error(), "invalid header field") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Assume the error is retriable.
|
||||
return false
|
||||
}
|
||||
|
||||
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
|
||||
|
||||
func isSuccessStatusCode(resp *http.Response) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, i := range successStatusCodes {
|
||||
if i == resp.StatusCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
||||
var logf = func(format string, a ...interface{}) {}
|
||||
|
||||
|
2
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_retry_reader.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_retry_reader.go
generated
vendored
@ -106,7 +106,7 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
return n, err // All retries exhausted
|
||||
}
|
||||
|
||||
if netErr, ok := err.(net.Error); ok && (netErr.Timeout() || netErr.Temporary()) {
|
||||
if _, ok := err.(net.Error); ok {
|
||||
continue
|
||||
// Loop around and try to get and read from new stream.
|
||||
}
|
||||
|
2
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_account.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_sas_account.go
generated
vendored
@ -205,7 +205,7 @@ func (rt *AccountSASResourceTypes) Parse(s string) error {
|
||||
switch r {
|
||||
case 's':
|
||||
rt.Service = true
|
||||
case 'q':
|
||||
case 'c':
|
||||
rt.Container = true
|
||||
case 'o':
|
||||
rt.Object = true
|
||||
|
7
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_storage_error.go
generated
vendored
7
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_storage_error.go
generated
vendored
@ -43,11 +43,14 @@ func newStorageError(cause error, response *http.Response, description string) e
|
||||
response: response,
|
||||
description: description,
|
||||
},
|
||||
serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")),
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
|
||||
func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode }
|
||||
func (e *storageError) ServiceCode() ServiceCodeType {
|
||||
return e.serviceCode
|
||||
}
|
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *storageError) Error() string {
|
||||
@ -94,8 +97,6 @@ func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err
|
||||
break
|
||||
case xml.CharData:
|
||||
switch tokName {
|
||||
case "Code":
|
||||
e.serviceCode = ServiceCodeType(tt)
|
||||
case "Message":
|
||||
e.description = string(tt)
|
||||
default:
|
||||
|
6
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_util_validate.go
generated
vendored
6
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zc_util_validate.go
generated
vendored
@ -16,8 +16,8 @@ type httpRange struct {
|
||||
}
|
||||
|
||||
func (r httpRange) pointers() *string {
|
||||
if r.offset == 0 && r.count == 0 { // Do common case first for performance
|
||||
return nil // No specified range
|
||||
if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance
|
||||
return nil // No specified range
|
||||
}
|
||||
if r.offset < 0 {
|
||||
panic("The range offset must be >= 0")
|
||||
@ -25,7 +25,7 @@ func (r httpRange) pointers() *string {
|
||||
if r.count < 0 {
|
||||
panic("The range count must be >= 0")
|
||||
}
|
||||
endOffset := "" // if count == 0
|
||||
endOffset := "" // if count == CountToEnd (0)
|
||||
if r.count > 0 {
|
||||
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10)
|
||||
}
|
||||
|
@ -33,20 +33,21 @@ func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
|
||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
||||
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
|
||||
// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
|
||||
// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
|
||||
// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
|
||||
// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
|
||||
// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
|
||||
// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
|
||||
// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
|
||||
// only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to
|
||||
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||
// in the analytics logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. leaseID is if specified, the operation only succeeds if the container's lease is active
|
||||
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
|
||||
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
|
||||
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
|
||||
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
|
||||
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
|
||||
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
|
||||
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
@ -55,7 +56,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.appendBlockPreparer(body, contentLength, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -67,7 +68,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
|
||||
}
|
||||
|
||||
// appendBlockPreparer prepares the AppendBlock request.
|
||||
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -79,6 +80,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
|
||||
params.Set("comp", "appendblock")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
@ -147,10 +151,7 @@ func (client appendBlobClient) Create(ctx context.Context, contentLength int64,
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
|
70
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_blob.go
generated
vendored
70
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_blob.go
generated
vendored
@ -6,14 +6,13 @@ package azblob
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// blobClient is the client for the Blob methods of the Azblob service.
|
||||
@ -347,10 +346,7 @@ func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, met
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, requestID)
|
||||
@ -593,6 +589,44 @@ func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Res
|
||||
return &downloadResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetAccountInfo returns the sku name and account kind
|
||||
func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
req, err := client.getAccountInfoPreparer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlobGetAccountInfoResponse), err
|
||||
}
|
||||
|
||||
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||
func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("restype", "account")
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||
func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system
|
||||
// properties for the blob. It does not return the content of the blob.
|
||||
//
|
||||
@ -942,10 +976,7 @@ func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metada
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.setMetadataPreparer(timeout, metadata, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
@ -1089,20 +1120,16 @@ func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Resp
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. leaseID is if specified, the operation only succeeds if the container's lease is active
|
||||
// and matches this ID. sourceLeaseID is specify this header to perform the operation only if the lease ID given
|
||||
// matches the active lease ID of the source blob. requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
|
||||
// and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatches, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, sourceLeaseID, requestID)
|
||||
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatches, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1114,7 +1141,7 @@ func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string
|
||||
}
|
||||
|
||||
// startCopyFromURLPreparer prepares the StartCopyFromURL request.
|
||||
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -1157,9 +1184,6 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if sourceLeaseID != nil {
|
||||
req.Header.Set("x-ms-source-lease-id", *sourceLeaseID)
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
|
@ -60,10 +60,7 @@ func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockL
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
@ -240,13 +237,14 @@ func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pip
|
||||
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
|
||||
// successful return. Callers should ensure closure when receiving an error.timeout is the timeout parameter is
|
||||
// expressed in seconds. For more information, see <a
|
||||
// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the
|
||||
// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
|
||||
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
@ -255,7 +253,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.stageBlockPreparer(blockID, contentLength, body, timeout, leaseID, requestID)
|
||||
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -267,7 +265,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
|
||||
}
|
||||
|
||||
// stageBlockPreparer prepares the StageBlock request.
|
||||
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -280,6 +278,9 @@ func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength i
|
||||
params.Set("comp", "block")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
@ -314,7 +315,7 @@ func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipel
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
@ -333,7 +334,7 @@ func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID str
|
||||
}
|
||||
|
||||
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
|
||||
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL *string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -346,9 +347,7 @@ func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentL
|
||||
params.Set("comp", "block")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if sourceURL != nil {
|
||||
req.Header.Set("x-ms-copy-source", *sourceURL)
|
||||
}
|
||||
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||
if sourceRange != nil {
|
||||
req.Header.Set("x-ms-source-range", *sourceRange)
|
||||
}
|
||||
@ -411,10 +410,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
|
@ -7,14 +7,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// containerClient is the client for the Container methods of the Azblob service.
|
||||
@ -264,10 +263,7 @@ func (client containerClient) Create(ctx context.Context, timeout *int32, metada
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createPreparer(timeout, metadata, access, requestID)
|
||||
@ -460,6 +456,44 @@ func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetAccountInfo returns the sku name and account kind
|
||||
func (client containerClient) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) {
|
||||
req, err := client.getAccountInfoPreparer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*ContainerGetAccountInfoResponse), err
|
||||
}
|
||||
|
||||
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||
func (client containerClient) getAccountInfoPreparer() (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("restype", "account")
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||
func (client containerClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &ContainerGetAccountInfoResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned
|
||||
// does not include the container's list of blobs
|
||||
//
|
||||
@ -946,10 +980,7 @@ func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, l
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID)
|
||||
|
421
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_models.go
generated
vendored
421
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_models.go
generated
vendored
@ -119,6 +119,25 @@ func PossibleAccessTierTypeValues() []AccessTierType {
|
||||
return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6}
|
||||
}
|
||||
|
||||
// AccountKindType enumerates the values for account kind type.
|
||||
type AccountKindType string
|
||||
|
||||
const (
|
||||
// AccountKindBlobStorage ...
|
||||
AccountKindBlobStorage AccountKindType = "BlobStorage"
|
||||
// AccountKindNone represents an empty AccountKindType.
|
||||
AccountKindNone AccountKindType = ""
|
||||
// AccountKindStorage ...
|
||||
AccountKindStorage AccountKindType = "Storage"
|
||||
// AccountKindStorageV2 ...
|
||||
AccountKindStorageV2 AccountKindType = "StorageV2"
|
||||
)
|
||||
|
||||
// PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type.
|
||||
func PossibleAccountKindTypeValues() []AccountKindType {
|
||||
return []AccountKindType{AccountKindBlobStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2}
|
||||
}
|
||||
|
||||
// ArchiveStatusType enumerates the values for archive status type.
|
||||
type ArchiveStatusType string
|
||||
|
||||
@ -362,6 +381,29 @@ func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType {
|
||||
return []SequenceNumberActionType{SequenceNumberActionIncrement, SequenceNumberActionMax, SequenceNumberActionNone, SequenceNumberActionUpdate}
|
||||
}
|
||||
|
||||
// SkuNameType enumerates the values for sku name type.
|
||||
type SkuNameType string
|
||||
|
||||
const (
|
||||
// SkuNameNone represents an empty SkuNameType.
|
||||
SkuNameNone SkuNameType = ""
|
||||
// SkuNamePremiumLRS ...
|
||||
SkuNamePremiumLRS SkuNameType = "Premium_LRS"
|
||||
// SkuNameStandardGRS ...
|
||||
SkuNameStandardGRS SkuNameType = "Standard_GRS"
|
||||
// SkuNameStandardLRS ...
|
||||
SkuNameStandardLRS SkuNameType = "Standard_LRS"
|
||||
// SkuNameStandardRAGRS ...
|
||||
SkuNameStandardRAGRS SkuNameType = "Standard_RAGRS"
|
||||
// SkuNameStandardZRS ...
|
||||
SkuNameStandardZRS SkuNameType = "Standard_ZRS"
|
||||
)
|
||||
|
||||
// PossibleSkuNameTypeValues returns an array of possible values for the SkuNameType const type.
|
||||
func PossibleSkuNameTypeValues() []SkuNameType {
|
||||
return []SkuNameType{SkuNameNone, SkuNamePremiumLRS, SkuNameStandardGRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardZRS}
|
||||
}
|
||||
|
||||
// StorageErrorCodeType enumerates the values for storage error code type.
|
||||
type StorageErrorCodeType string
|
||||
|
||||
@ -1180,11 +1222,71 @@ func (bdr BlobDeleteResponse) Version() string {
|
||||
return bdr.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// BlobFlatList ...
|
||||
type BlobFlatList struct {
|
||||
// BlobFlatListSegment ...
|
||||
type BlobFlatListSegment struct {
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"Blobs"`
|
||||
BlobItems []BlobItem `xml:"Blob"`
|
||||
}
|
||||
|
||||
// BlobGetAccountInfoResponse ...
|
||||
type BlobGetAccountInfoResponse struct {
|
||||
rawResponse *http.Response
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (bgair BlobGetAccountInfoResponse) Response() *http.Response {
|
||||
return bgair.rawResponse
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (bgair BlobGetAccountInfoResponse) StatusCode() int {
|
||||
return bgair.rawResponse.StatusCode
|
||||
}
|
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (bgair BlobGetAccountInfoResponse) Status() string {
|
||||
return bgair.rawResponse.Status
|
||||
}
|
||||
|
||||
// AccountKind returns the value for header x-ms-account-kind.
|
||||
func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType {
|
||||
return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind"))
|
||||
}
|
||||
|
||||
// Date returns the value for header Date.
|
||||
func (bgair BlobGetAccountInfoResponse) Date() time.Time {
|
||||
s := bgair.rawResponse.Header.Get("Date")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ErrorCode returns the value for header x-ms-error-code.
|
||||
func (bgair BlobGetAccountInfoResponse) ErrorCode() string {
|
||||
return bgair.rawResponse.Header.Get("x-ms-error-code")
|
||||
}
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (bgair BlobGetAccountInfoResponse) RequestID() string {
|
||||
return bgair.rawResponse.Header.Get("x-ms-request-id")
|
||||
}
|
||||
|
||||
// SkuName returns the value for header x-ms-sku-name.
|
||||
func (bgair BlobGetAccountInfoResponse) SkuName() SkuNameType {
|
||||
return SkuNameType(bgair.rawResponse.Header.Get("x-ms-sku-name"))
|
||||
}
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (bgair BlobGetAccountInfoResponse) Version() string {
|
||||
return bgair.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// BlobGetPropertiesResponse ...
|
||||
type BlobGetPropertiesResponse struct {
|
||||
rawResponse *http.Response
|
||||
@ -1228,6 +1330,19 @@ func (bgpr BlobGetPropertiesResponse) AccessTier() string {
|
||||
return bgpr.rawResponse.Header.Get("x-ms-access-tier")
|
||||
}
|
||||
|
||||
// AccessTierChangeTime returns the value for header x-ms-access-tier-change-time.
|
||||
func (bgpr BlobGetPropertiesResponse) AccessTierChangeTime() time.Time {
|
||||
s := bgpr.rawResponse.Header.Get("x-ms-access-tier-change-time")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// AccessTierInferred returns the value for header x-ms-access-tier-inferred.
|
||||
func (bgpr BlobGetPropertiesResponse) AccessTierInferred() string {
|
||||
return bgpr.rawResponse.Header.Get("x-ms-access-tier-inferred")
|
||||
@ -1358,6 +1473,19 @@ func (bgpr BlobGetPropertiesResponse) CopyStatusDescription() string {
|
||||
return bgpr.rawResponse.Header.Get("x-ms-copy-status-description")
|
||||
}
|
||||
|
||||
// CreationTime returns the value for header x-ms-creation-time.
|
||||
func (bgpr BlobGetPropertiesResponse) CreationTime() time.Time {
|
||||
s := bgpr.rawResponse.Header.Get("x-ms-creation-time")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Date returns the value for header Date.
|
||||
func (bgpr BlobGetPropertiesResponse) Date() time.Time {
|
||||
s := bgpr.rawResponse.Header.Get("Date")
|
||||
@ -1434,14 +1562,18 @@ func (bgpr BlobGetPropertiesResponse) Version() string {
|
||||
return bgpr.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// BlobHierarchyList ...
|
||||
type BlobHierarchyList struct {
|
||||
// BlobHierarchyListSegment ...
|
||||
type BlobHierarchyListSegment struct {
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"Blobs"`
|
||||
BlobPrefixes []BlobPrefix `xml:"BlobPrefix"`
|
||||
BlobItems []BlobItem `xml:"Blob"`
|
||||
}
|
||||
|
||||
// BlobItem - An Azure Storage blob
|
||||
type BlobItem struct {
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"Blob"`
|
||||
Name string `xml:"Name"`
|
||||
Deleted bool `xml:"Deleted"`
|
||||
Snapshot string `xml:"Snapshot"`
|
||||
@ -1456,8 +1588,11 @@ type BlobPrefix struct {
|
||||
|
||||
// BlobProperties - Properties of a blob
|
||||
type BlobProperties struct {
|
||||
LastModified time.Time `xml:"Last-Modified"`
|
||||
Etag ETag `xml:"Etag"`
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"Properties"`
|
||||
CreationTime *time.Time `xml:"Creation-Time"`
|
||||
LastModified time.Time `xml:"Last-Modified"`
|
||||
Etag ETag `xml:"Etag"`
|
||||
// ContentLength - Size in bytes
|
||||
ContentLength *int64 `xml:"Content-Length"`
|
||||
ContentType *string `xml:"Content-Type"`
|
||||
@ -1491,7 +1626,8 @@ type BlobProperties struct {
|
||||
AccessTier AccessTierType `xml:"AccessTier"`
|
||||
AccessTierInferred *bool `xml:"AccessTierInferred"`
|
||||
// ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone'
|
||||
ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
|
||||
ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
|
||||
AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"`
|
||||
}
|
||||
|
||||
// MarshalXML implements the xml.Marshaler interface for BlobProperties.
|
||||
@ -2702,6 +2838,64 @@ func (cdr ContainerDeleteResponse) Version() string {
|
||||
return cdr.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// ContainerGetAccountInfoResponse ...
|
||||
type ContainerGetAccountInfoResponse struct {
|
||||
rawResponse *http.Response
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (cgair ContainerGetAccountInfoResponse) Response() *http.Response {
|
||||
return cgair.rawResponse
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (cgair ContainerGetAccountInfoResponse) StatusCode() int {
|
||||
return cgair.rawResponse.StatusCode
|
||||
}
|
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (cgair ContainerGetAccountInfoResponse) Status() string {
|
||||
return cgair.rawResponse.Status
|
||||
}
|
||||
|
||||
// AccountKind returns the value for header x-ms-account-kind.
|
||||
func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType {
|
||||
return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind"))
|
||||
}
|
||||
|
||||
// Date returns the value for header Date.
|
||||
func (cgair ContainerGetAccountInfoResponse) Date() time.Time {
|
||||
s := cgair.rawResponse.Header.Get("Date")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ErrorCode returns the value for header x-ms-error-code.
|
||||
func (cgair ContainerGetAccountInfoResponse) ErrorCode() string {
|
||||
return cgair.rawResponse.Header.Get("x-ms-error-code")
|
||||
}
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (cgair ContainerGetAccountInfoResponse) RequestID() string {
|
||||
return cgair.rawResponse.Header.Get("x-ms-request-id")
|
||||
}
|
||||
|
||||
// SkuName returns the value for header x-ms-sku-name.
|
||||
func (cgair ContainerGetAccountInfoResponse) SkuName() SkuNameType {
|
||||
return SkuNameType(cgair.rawResponse.Header.Get("x-ms-sku-name"))
|
||||
}
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (cgair ContainerGetAccountInfoResponse) Version() string {
|
||||
return cgair.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// ContainerGetPropertiesResponse ...
|
||||
type ContainerGetPropertiesResponse struct {
|
||||
rawResponse *http.Response
|
||||
@ -2763,6 +2957,16 @@ func (cgpr ContainerGetPropertiesResponse) ETag() ETag {
|
||||
return ETag(cgpr.rawResponse.Header.Get("ETag"))
|
||||
}
|
||||
|
||||
// HasImmutabilityPolicy returns the value for header x-ms-has-immutability-policy.
|
||||
func (cgpr ContainerGetPropertiesResponse) HasImmutabilityPolicy() string {
|
||||
return cgpr.rawResponse.Header.Get("x-ms-has-immutability-policy")
|
||||
}
|
||||
|
||||
// HasLegalHold returns the value for header x-ms-has-legal-hold.
|
||||
func (cgpr ContainerGetPropertiesResponse) HasLegalHold() string {
|
||||
return cgpr.rawResponse.Header.Get("x-ms-has-legal-hold")
|
||||
}
|
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
func (cgpr ContainerGetPropertiesResponse) LastModified() time.Time {
|
||||
s := cgpr.rawResponse.Header.Get("Last-Modified")
|
||||
@ -2803,6 +3007,8 @@ func (cgpr ContainerGetPropertiesResponse) Version() string {
|
||||
|
||||
// ContainerItem - An Azure Storage container
|
||||
type ContainerItem struct {
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"Container"`
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
Metadata Metadata `xml:"Metadata"`
|
||||
@ -2819,7 +3025,9 @@ type ContainerProperties struct {
|
||||
// LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone'
|
||||
LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
|
||||
// PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone'
|
||||
PublicAccess PublicAccessType `xml:"PublicAccess"`
|
||||
PublicAccess PublicAccessType `xml:"PublicAccess"`
|
||||
HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
|
||||
HasLegalHold *bool `xml:"HasLegalHold"`
|
||||
}
|
||||
|
||||
// MarshalXML implements the xml.Marshaler interface for ContainerProperties.
|
||||
@ -3373,7 +3581,7 @@ func (dr downloadResponse) Version() string {
|
||||
return dr.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// GeoReplication ...
|
||||
// GeoReplication - Geo-Replication information for the Secondary Storage Service
|
||||
type GeoReplication struct {
|
||||
// Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone'
|
||||
Status GeoReplicationStatusType `xml:"Status"`
|
||||
@ -3403,15 +3611,15 @@ func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
|
||||
type ListBlobsFlatSegmentResponse struct {
|
||||
rawResponse *http.Response
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
|
||||
ContainerName string `xml:"ContainerName,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
MaxResults int32 `xml:"MaxResults"`
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
Segment BlobFlatList `xml:"Blobs"`
|
||||
NextMarker Marker `xml:"NextMarker"`
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
|
||||
ContainerName string `xml:"ContainerName,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
MaxResults int32 `xml:"MaxResults"`
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
Segment BlobFlatListSegment `xml:"Blobs"`
|
||||
NextMarker Marker `xml:"NextMarker"`
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
@ -3466,15 +3674,15 @@ func (lbfsr ListBlobsFlatSegmentResponse) Version() string {
|
||||
type ListBlobsHierarchySegmentResponse struct {
|
||||
rawResponse *http.Response
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
|
||||
ContainerName string `xml:"ContainerName,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
MaxResults int32 `xml:"MaxResults"`
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
Segment BlobHierarchyList `xml:"Blobs"`
|
||||
NextMarker Marker `xml:"NextMarker"`
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
ServiceEndpoint string `xml:"ServiceEndpoint,attr"`
|
||||
ContainerName string `xml:"ContainerName,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
MaxResults int32 `xml:"MaxResults"`
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
Segment BlobHierarchyListSegment `xml:"Blobs"`
|
||||
NextMarker Marker `xml:"NextMarker"`
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
@ -3525,8 +3733,8 @@ func (lbhsr ListBlobsHierarchySegmentResponse) Version() string {
|
||||
return lbhsr.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// ListContainersResponse - An enumeration of containers
|
||||
type ListContainersResponse struct {
|
||||
// ListContainersSegmentResponse - An enumeration of containers
|
||||
type ListContainersSegmentResponse struct {
|
||||
rawResponse *http.Response
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
@ -3539,33 +3747,33 @@ type ListContainersResponse struct {
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (lcr ListContainersResponse) Response() *http.Response {
|
||||
return lcr.rawResponse
|
||||
func (lcsr ListContainersSegmentResponse) Response() *http.Response {
|
||||
return lcsr.rawResponse
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (lcr ListContainersResponse) StatusCode() int {
|
||||
return lcr.rawResponse.StatusCode
|
||||
func (lcsr ListContainersSegmentResponse) StatusCode() int {
|
||||
return lcsr.rawResponse.StatusCode
|
||||
}
|
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (lcr ListContainersResponse) Status() string {
|
||||
return lcr.rawResponse.Status
|
||||
func (lcsr ListContainersSegmentResponse) Status() string {
|
||||
return lcsr.rawResponse.Status
|
||||
}
|
||||
|
||||
// ErrorCode returns the value for header x-ms-error-code.
|
||||
func (lcr ListContainersResponse) ErrorCode() string {
|
||||
return lcr.rawResponse.Header.Get("x-ms-error-code")
|
||||
func (lcsr ListContainersSegmentResponse) ErrorCode() string {
|
||||
return lcsr.rawResponse.Header.Get("x-ms-error-code")
|
||||
}
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (lcr ListContainersResponse) RequestID() string {
|
||||
return lcr.rawResponse.Header.Get("x-ms-request-id")
|
||||
func (lcsr ListContainersSegmentResponse) RequestID() string {
|
||||
return lcsr.rawResponse.Header.Get("x-ms-request-id")
|
||||
}
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (lcr ListContainersResponse) Version() string {
|
||||
return lcr.rawResponse.Header.Get("x-ms-version")
|
||||
func (lcsr ListContainersSegmentResponse) Version() string {
|
||||
return lcsr.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// Logging - Azure Analytics Logging settings.
|
||||
@ -3581,7 +3789,7 @@ type Logging struct {
|
||||
RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"`
|
||||
}
|
||||
|
||||
// Metrics ...
|
||||
// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs
|
||||
type Metrics struct {
|
||||
// Version - The version of Storage Analytics to configure.
|
||||
Version *string `xml:"Version"`
|
||||
@ -4186,7 +4394,7 @@ type PageRange struct {
|
||||
End int64 `xml:"End"`
|
||||
}
|
||||
|
||||
// RetentionPolicy - the retention policy
|
||||
// RetentionPolicy - the retention policy which determines how long the associated data should persist
|
||||
type RetentionPolicy struct {
|
||||
// Enabled - Indicates whether a retention policy is enabled for the storage service
|
||||
Enabled bool `xml:"Enabled"`
|
||||
@ -4194,6 +4402,64 @@ type RetentionPolicy struct {
|
||||
Days *int32 `xml:"Days"`
|
||||
}
|
||||
|
||||
// ServiceGetAccountInfoResponse ...
|
||||
type ServiceGetAccountInfoResponse struct {
|
||||
rawResponse *http.Response
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (sgair ServiceGetAccountInfoResponse) Response() *http.Response {
|
||||
return sgair.rawResponse
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (sgair ServiceGetAccountInfoResponse) StatusCode() int {
|
||||
return sgair.rawResponse.StatusCode
|
||||
}
|
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (sgair ServiceGetAccountInfoResponse) Status() string {
|
||||
return sgair.rawResponse.Status
|
||||
}
|
||||
|
||||
// AccountKind returns the value for header x-ms-account-kind.
|
||||
func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType {
|
||||
return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind"))
|
||||
}
|
||||
|
||||
// Date returns the value for header Date.
|
||||
func (sgair ServiceGetAccountInfoResponse) Date() time.Time {
|
||||
s := sgair.rawResponse.Header.Get("Date")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ErrorCode returns the value for header x-ms-error-code.
|
||||
func (sgair ServiceGetAccountInfoResponse) ErrorCode() string {
|
||||
return sgair.rawResponse.Header.Get("x-ms-error-code")
|
||||
}
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (sgair ServiceGetAccountInfoResponse) RequestID() string {
|
||||
return sgair.rawResponse.Header.Get("x-ms-request-id")
|
||||
}
|
||||
|
||||
// SkuName returns the value for header x-ms-sku-name.
|
||||
func (sgair ServiceGetAccountInfoResponse) SkuName() SkuNameType {
|
||||
return SkuNameType(sgair.rawResponse.Header.Get("x-ms-sku-name"))
|
||||
}
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (sgair ServiceGetAccountInfoResponse) Version() string {
|
||||
return sgair.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// ServiceSetPropertiesResponse ...
|
||||
type ServiceSetPropertiesResponse struct {
|
||||
rawResponse *http.Response
|
||||
@ -4232,8 +4498,7 @@ func (sspr ServiceSetPropertiesResponse) Version() string {
|
||||
// SignedIdentifier - signed identifier
|
||||
type SignedIdentifier struct {
|
||||
// ID - a unique id
|
||||
ID string `xml:"Id"`
|
||||
// AccessPolicy - The access policy
|
||||
ID string `xml:"Id"`
|
||||
AccessPolicy AccessPolicy `xml:"AccessPolicy"`
|
||||
}
|
||||
|
||||
@ -4309,21 +4574,28 @@ func (si SignedIdentifiers) Version() string {
|
||||
return si.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// StaticWebsite - The properties that enable an account to host a static website
|
||||
type StaticWebsite struct {
|
||||
// Enabled - Indicates whether this account is hosting a static website
|
||||
Enabled bool `xml:"Enabled"`
|
||||
// IndexDocument - The default name of the index page under each directory
|
||||
IndexDocument *string `xml:"IndexDocument"`
|
||||
// ErrorDocument404Path - The absolute path of the custom 404 page
|
||||
ErrorDocument404Path *string `xml:"ErrorDocument404Path"`
|
||||
}
|
||||
|
||||
// StorageServiceProperties - Storage Service Properties.
|
||||
type StorageServiceProperties struct {
|
||||
rawResponse *http.Response
|
||||
// Logging - Azure Analytics Logging settings
|
||||
Logging *Logging `xml:"Logging"`
|
||||
// HourMetrics - A summary of request statistics grouped by API in hourly aggregates for blobs
|
||||
HourMetrics *Metrics `xml:"HourMetrics"`
|
||||
// MinuteMetrics - a summary of request statistics grouped by API in minute aggregates for blobs
|
||||
rawResponse *http.Response
|
||||
Logging *Logging `xml:"Logging"`
|
||||
HourMetrics *Metrics `xml:"HourMetrics"`
|
||||
MinuteMetrics *Metrics `xml:"MinuteMetrics"`
|
||||
// Cors - The set of CORS rules.
|
||||
Cors []CorsRule `xml:"Cors>CorsRule"`
|
||||
// DefaultServiceVersion - The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions
|
||||
DefaultServiceVersion *string `xml:"DefaultServiceVersion"`
|
||||
// DeleteRetentionPolicy - The Delete Retention Policy for the service
|
||||
DefaultServiceVersion *string `xml:"DefaultServiceVersion"`
|
||||
DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"`
|
||||
StaticWebsite *StaticWebsite `xml:"StaticWebsite"`
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
@ -4358,8 +4630,7 @@ func (ssp StorageServiceProperties) Version() string {
|
||||
|
||||
// StorageServiceStats - Stats for the storage service.
|
||||
type StorageServiceStats struct {
|
||||
rawResponse *http.Response
|
||||
// GeoReplication - Geo-Replication information for the Secondary Storage Service
|
||||
rawResponse *http.Response
|
||||
GeoReplication *GeoReplication `xml:"GeoReplication"`
|
||||
}
|
||||
|
||||
@ -4445,6 +4716,26 @@ func (t *timeRFC3339) UnmarshalText(data []byte) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// internal type used for marshalling base64 encoded strings
|
||||
type base64Encoded struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface for base64Encoded.
|
||||
func (c base64Encoded) MarshalText() ([]byte, error) {
|
||||
return []byte(base64.StdEncoding.EncodeToString(c.b)), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface for base64Encoded.
|
||||
func (c *base64Encoded) UnmarshalText(data []byte) error {
|
||||
b, err := base64.StdEncoding.DecodeString(string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.b = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// internal type used for marshalling
|
||||
type accessPolicy struct {
|
||||
Start timeRFC3339 `xml:"Start"`
|
||||
@ -4454,13 +4745,16 @@ type accessPolicy struct {
|
||||
|
||||
// internal type used for marshalling
|
||||
type blobProperties struct {
|
||||
// XMLName is used for marshalling and is subject to removal in a future release.
|
||||
XMLName xml.Name `xml:"Properties"`
|
||||
CreationTime *timeRFC1123 `xml:"Creation-Time"`
|
||||
LastModified timeRFC1123 `xml:"Last-Modified"`
|
||||
Etag ETag `xml:"Etag"`
|
||||
ContentLength *int64 `xml:"Content-Length"`
|
||||
ContentType *string `xml:"Content-Type"`
|
||||
ContentEncoding *string `xml:"Content-Encoding"`
|
||||
ContentLanguage *string `xml:"Content-Language"`
|
||||
ContentMD5 []byte `xml:"Content-MD5"`
|
||||
ContentMD5 base64Encoded `xml:"Content-MD5"`
|
||||
ContentDisposition *string `xml:"Content-Disposition"`
|
||||
CacheControl *string `xml:"Cache-Control"`
|
||||
BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"`
|
||||
@ -4482,16 +4776,19 @@ type blobProperties struct {
|
||||
AccessTier AccessTierType `xml:"AccessTier"`
|
||||
AccessTierInferred *bool `xml:"AccessTierInferred"`
|
||||
ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
|
||||
AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
|
||||
}
|
||||
|
||||
// internal type used for marshalling
|
||||
type containerProperties struct {
|
||||
LastModified timeRFC1123 `xml:"Last-Modified"`
|
||||
Etag ETag `xml:"Etag"`
|
||||
LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
|
||||
LeaseState LeaseStateType `xml:"LeaseState"`
|
||||
LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
|
||||
PublicAccess PublicAccessType `xml:"PublicAccess"`
|
||||
LastModified timeRFC1123 `xml:"Last-Modified"`
|
||||
Etag ETag `xml:"Etag"`
|
||||
LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
|
||||
LeaseState LeaseStateType `xml:"LeaseState"`
|
||||
LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
|
||||
PublicAccess PublicAccessType `xml:"PublicAccess"`
|
||||
HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
|
||||
HasLegalHold *bool `xml:"HasLegalHold"`
|
||||
}
|
||||
|
||||
// internal type used for marshalling
|
||||
|
@ -144,10 +144,7 @@ func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource str
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.copyIncrementalPreparer(copySource, timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
@ -211,8 +208,9 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p
|
||||
|
||||
// Create the Create operation creates a new page blob.
|
||||
//
|
||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page
|
||||
// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is
|
||||
// expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
@ -231,23 +229,18 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentLength is this
|
||||
// header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte
|
||||
// boundary. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
|
||||
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set
|
||||
// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
|
||||
// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1
|
||||
// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
|
||||
{targetValue: metadata,
|
||||
constraints: []constraint{{target: "metadata", name: null, rule: false,
|
||||
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobContentLength, blobSequenceNumber, requestID)
|
||||
req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobSequenceNumber, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -259,7 +252,7 @@ func (client pageBlobClient) Create(ctx context.Context, contentLength int64, ti
|
||||
}
|
||||
|
||||
// createPreparer prepares the Create request.
|
||||
func (client pageBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -308,9 +301,7 @@ func (client pageBlobClient) createPreparer(contentLength int64, timeout *int32,
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
if blobContentLength != nil {
|
||||
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(*blobContentLength, 10))
|
||||
}
|
||||
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
|
||||
if blobSequenceNumber != nil {
|
||||
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
|
||||
}
|
||||
@ -700,7 +691,8 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons
|
||||
// UploadPages the Upload Pages operation writes a range of pages to a page blob
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the
|
||||
// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
@ -714,7 +706,7 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
|
||||
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
@ -723,7 +715,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPagesPreparer(body, contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -735,7 +727,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
|
||||
}
|
||||
|
||||
// uploadPagesPreparer prepares the UploadPages request.
|
||||
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
@ -747,6 +739,9 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
|
||||
params.Set("comp", "page")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if rangeParameter != nil {
|
||||
req.Header.Set("x-ms-range", *rangeParameter)
|
||||
}
|
||||
|
44
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_service.go
generated
vendored
44
vendor/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob/zz_generated_service.go
generated
vendored
@ -25,6 +25,44 @@ func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient {
|
||||
return serviceClient{newManagementClient(url, p)}
|
||||
}
|
||||
|
||||
// GetAccountInfo returns the sku name and account kind
|
||||
func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
|
||||
req, err := client.getAccountInfoPreparer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*ServiceGetAccountInfoResponse), err
|
||||
}
|
||||
|
||||
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||
func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("restype", "account")
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||
func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics
|
||||
// and CORS (Cross-Origin Resource Sharing) rules.
|
||||
//
|
||||
@ -183,7 +221,7 @@ func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipe
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersResponse, error) {
|
||||
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: maxresults,
|
||||
constraints: []constraint{{target: "maxresults", name: null, rule: false,
|
||||
@ -201,7 +239,7 @@ func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*ListContainersResponse), err
|
||||
return resp.(*ListContainersSegmentResponse), err
|
||||
}
|
||||
|
||||
// listContainersSegmentPreparer prepares the ListContainersSegment request.
|
||||
@ -241,7 +279,7 @@ func (client serviceClient) listContainersSegmentResponder(resp pipeline.Respons
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &ListContainersResponse{rawResponse: resp.Response()}
|
||||
result := &ListContainersSegmentResponse{rawResponse: resp.Response()}
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
|
||||
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
|
||||
resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count,
|
||||
BlobAccessConditions{
|
||||
HTTPAccessConditions: HTTPAccessConditions{IfMatch: getInfo.ETag},
|
||||
ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag},
|
||||
},
|
||||
false)
|
||||
if err != nil {
|
||||
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@ -6,7 +6,7 @@ bazil.org/fuse/fuseutil
|
||||
cloud.google.com/go/compute/metadata
|
||||
# github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876
|
||||
github.com/Azure/azure-pipeline-go/pipeline
|
||||
# github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e
|
||||
# github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7
|
||||
github.com/Azure/azure-storage-blob-go/2018-03-28/azblob
|
||||
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
|
||||
github.com/Azure/go-ansiterm
|
||||
|
Loading…
Reference in New Issue
Block a user