b2: fix multipart upload retries #1733

Prior to this fix we were uploading 0 length bodies if a retry was
needed on a multipart upload chunk.  This gave this error `http:
ContentLength=268435496 with Body length 0`.

Fix by remaking the hash appending reader in the Call loop.  This is
inefficient in the face of retries, but these are uncommon.
This commit is contained in:
Nick Craig-Wood 2017-12-13 10:11:20 +00:00
parent 2cf808c825
commit 0914ec316c

View File

@ -182,9 +182,6 @@ func (up *largeUpload) clearUploadURL() {
// Transfer a chunk
func (up *largeUpload) transferChunk(part int64, body []byte) error {
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
@ -194,6 +191,9 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
return false, err
}
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization
//
// An upload authorization token, from b2_get_upload_part_url.
@ -238,6 +238,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
upload = nil
}
up.returnUploadURL(upload)
up.sha1s[part-1] = in.HexSum()
return retry, err
})
if err != nil {
@ -245,7 +246,6 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
} else {
fs.Debugf(up.o, "Done sending chunk %d", part)
}
up.sha1s[part-1] = in.HexSum()
return err
}