2015-11-27 19:25:52 +01:00
// Package b2 provides an interface to the Backblaze B2 object storage system
package b2
2016-01-18 18:53:03 +01:00
// FIXME should we remove sha1 checks from here as rclone now supports
// checking SHA1s?
2015-11-27 19:25:52 +01:00
import (
2017-09-16 22:43:48 +02:00
"bufio"
2015-11-27 19:25:52 +01:00
"bytes"
"crypto/sha1"
"fmt"
2018-01-12 17:30:54 +01:00
gohash "hash"
2015-11-27 19:25:52 +01:00
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
2018-01-11 17:05:41 +01:00
"github.com/ncw/rclone/backend/b2/api"
2015-11-27 19:25:52 +01:00
"github.com/ncw/rclone/fs"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/fs/accounting"
2018-05-14 19:06:57 +02:00
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
2018-01-11 17:29:20 +01:00
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
2016-06-12 16:06:02 +02:00
"github.com/pkg/errors"
2015-11-27 19:25:52 +01:00
)
const (
2018-05-14 19:06:57 +02:00
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
timeKey = "src_last_modified_millis"
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
retryAfterHeader = "Retry-After"
minSleep = 10 * time . Millisecond
maxSleep = 5 * time . Minute
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
2018-09-07 13:02:27 +02:00
minChunkSize = 5 * fs . MebiByte
defaultChunkSize = 96 * fs . MebiByte
defaultUploadCutoff = 200 * fs . MebiByte
2016-06-15 19:49:11 +02:00
)
// Globals
var (
2016-07-05 12:26:02 +02:00
errNotWithVersions = errors . New ( "can't modify or delete files in --b2-versions mode" )
2015-11-27 19:25:52 +01:00
)
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "b2" ,
Description : "Backblaze B2" ,
NewFs : NewFs ,
2015-11-27 19:25:52 +01:00
Options : [ ] fs . Option { {
2018-05-14 19:06:57 +02:00
Name : "account" ,
2018-08-01 15:33:01 +02:00
Help : "Account ID or Application Key ID" ,
2018-05-14 19:06:57 +02:00
Required : true ,
2015-11-27 19:25:52 +01:00
} , {
2018-05-14 19:06:57 +02:00
Name : "key" ,
Help : "Application Key" ,
Required : true ,
2015-11-27 19:25:52 +01:00
} , {
2018-05-14 19:06:57 +02:00
Name : "endpoint" ,
Help : "Endpoint for the service.\nLeave blank normally." ,
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "test_mode" ,
Help : ` A flag string for X - Bz - Test - Mode header for debugging .
This is for debugging purposes only . Setting it to one of the strings
below will cause b2 to return specific errors :
* "fail_some_uploads"
* "expire_some_account_authorization_tokens"
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [ b2 integrations checklist ] ( https : //www.backblaze.com/b2/docs/integration_checklist.html).`,
2018-05-14 19:06:57 +02:00
Default : "" ,
Hide : fs . OptionHideConfigurator ,
Advanced : true ,
} , {
Name : "versions" ,
2018-10-01 19:36:15 +02:00
Help : "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them." ,
2018-05-14 19:06:57 +02:00
Default : false ,
Advanced : true ,
} , {
Name : "hard_delete" ,
Help : "Permanently delete files on remote removal, otherwise hide files." ,
Default : false ,
} , {
2018-10-01 19:36:15 +02:00
Name : "upload_cutoff" ,
Help : ` Cutoff for switching to chunked upload .
Files above this size will be uploaded in chunks of "--b2-chunk-size" .
This value should be set no larger than 4.657 GiB ( == 5 GB ) . ` ,
2019-01-11 18:17:46 +01:00
Default : defaultUploadCutoff ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
Help : ` Upload chunk size . Must fit in memory .
When uploading large files , chunk the file into this size . Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once . 5 , 000 , 000 Bytes is the
2019-02-07 18:41:17 +01:00
minimum size . ` ,
2019-01-11 18:17:46 +01:00
Default : defaultChunkSize ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2019-01-20 16:33:42 +01:00
} , {
Name : "disable_checksum" ,
Help : ` Disable checksums for large (> upload cutoff) files ` ,
Default : false ,
Advanced : true ,
2019-02-09 22:56:24 +01:00
} , {
Name : "download_url" ,
Help : ` Custom endpoint for downloads .
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network .
Leave blank if you want to use the endpoint provided by Backblaze . ` ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} } ,
2015-11-27 19:25:52 +01:00
} )
2018-05-14 19:06:57 +02:00
}
// Options defines the configuration for this backend
type Options struct {
2019-01-20 16:33:42 +01:00
Account string ` config:"account" `
Key string ` config:"key" `
Endpoint string ` config:"endpoint" `
TestMode string ` config:"test_mode" `
Versions bool ` config:"versions" `
HardDelete bool ` config:"hard_delete" `
UploadCutoff fs . SizeSuffix ` config:"upload_cutoff" `
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
DisableCheckSum bool ` config:"disable_checksum" `
2019-02-09 22:56:24 +01:00
DownloadURL string ` config:"download_url" `
2015-11-27 19:25:52 +01:00
}
// Fs represents a remote b2 server
type Fs struct {
name string // name of this remote
2017-01-13 18:21:47 +01:00
root string // the path we are working on if any
2018-05-14 19:06:57 +02:00
opt Options // parsed config options
2017-01-13 18:21:47 +01:00
features * fs . Features // optional features
2015-11-27 19:25:52 +01:00
srv * rest . Client // the connection to the b2 server
bucket string // the bucket we are working on
2017-06-07 15:16:50 +02:00
bucketOKMu sync . Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
2015-11-27 19:25:52 +01:00
bucketIDMutex sync . Mutex // mutex to protect _bucketID
_bucketID string // the ID of the bucket we are working on
info api . AuthorizeAccountResponse // result of authorize call
uploadMu sync . Mutex // lock for upload variable
2016-02-27 14:00:35 +01:00
uploads [ ] * api . GetUploadURLResponse // result of get upload URL calls
2016-02-23 22:19:33 +01:00
authMu sync . Mutex // lock for authorizing the account
2016-02-23 23:15:20 +01:00
pacer * pacer . Pacer // To pace and retry the API calls
2017-01-29 23:21:39 +01:00
bufferTokens chan [ ] byte // control concurrency of multipart uploads
2015-11-27 19:25:52 +01:00
}
// Object describes a b2 object
type Object struct {
2016-09-21 23:13:24 +02:00
fs * Fs // what this object is part of
remote string // The remote path
id string // b2 id of the file
modTime time . Time // The modified time of the object if known
sha1 string // SHA-1 hash if known
size int64 // Size of the object
mimeType string // Content-Type of the object
2015-11-27 19:25:52 +01:00
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
if f . root == "" {
return f . bucket
}
return f . bucket + "/" + f . root
}
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
if f . root == "" {
return fmt . Sprintf ( "B2 bucket %s" , f . bucket )
}
return fmt . Sprintf ( "B2 bucket %s path %s" , f . bucket , f . root )
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2015-11-27 19:25:52 +01:00
// Pattern to match a b2 path
2018-08-27 00:19:28 +02:00
var matcher = regexp . MustCompile ( ` ^/*([^/]*)(.*)$ ` )
2015-11-27 19:25:52 +01:00
// parseParse parses a b2 'url'
func parsePath ( path string ) ( bucket , directory string , err error ) {
parts := matcher . FindStringSubmatch ( path )
if parts == nil {
2016-06-12 16:06:02 +02:00
err = errors . Errorf ( "couldn't find bucket in b2 path %q" , path )
2015-11-27 19:25:52 +01:00
} else {
bucket , directory = parts [ 1 ] , parts [ 2 ]
directory = strings . Trim ( directory , "/" )
}
return
}
2016-02-23 23:15:20 +01:00
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
401 , // Unauthorized (eg "Token has expired")
408 , // Request Timeout
429 , // Rate exceeded.
500 , // Get occasional 500 Internal Server Error
503 , // Service Unavailable
504 , // Gateway Time-out
}
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func ( f * Fs ) shouldRetryNoReauth ( resp * http . Response , err error ) ( bool , error ) {
2016-07-01 17:23:23 +02:00
// For 429 or 503 errors look at the Retry-After: header and
// set the retry appropriately, starting with a minimum of 1
// second if it isn't set.
if resp != nil && ( resp . StatusCode == 429 || resp . StatusCode == 503 ) {
var retryAfter = 1
retryAfterString := resp . Header . Get ( retryAfterHeader )
if retryAfterString != "" {
var err error
retryAfter , err = strconv . Atoi ( retryAfterString )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Errorf ( f , "Malformed %s header %q: %v" , retryAfterHeader , retryAfterString , err )
2016-07-01 17:23:23 +02:00
}
}
retryAfterDuration := time . Duration ( retryAfter ) * time . Second
if f . pacer . GetSleep ( ) < retryAfterDuration {
2017-02-09 12:01:20 +01:00
fs . Debugf ( f , "Setting sleep to %v after error: %v" , retryAfterDuration , err )
2016-07-01 17:23:23 +02:00
// We set 1/2 the value here because the pacer will double it immediately
f . pacer . SetSleep ( retryAfterDuration / 2 )
}
return true , err
}
2018-01-12 17:30:54 +01:00
return fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2016-02-23 23:15:20 +01:00
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func ( f * Fs ) shouldRetry ( resp * http . Response , err error ) ( bool , error ) {
2016-07-01 12:47:42 +02:00
if resp != nil && resp . StatusCode == 401 {
2017-02-09 12:01:20 +01:00
fs . Debugf ( f , "Unauthorized: %v" , err )
2016-02-23 23:15:20 +01:00
// Reauth
authErr := f . authorizeAccount ( )
if authErr != nil {
err = authErr
}
return true , err
}
return f . shouldRetryNoReauth ( resp , err )
}
2015-11-27 19:25:52 +01:00
// errorHandler parses a non 2xx error response into an error
func errorHandler ( resp * http . Response ) error {
// Decode error response
errResponse := new ( api . Error )
err := rest . DecodeJSON ( resp , & errResponse )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Debugf ( nil , "Couldn't decode error response: %v" , err )
2015-11-27 19:25:52 +01:00
}
if errResponse . Code == "" {
errResponse . Code = "unknown"
}
if errResponse . Status == 0 {
errResponse . Status = resp . StatusCode
}
if errResponse . Message == "" {
errResponse . Message = "Unknown " + resp . Status
}
return errResponse
}
2018-09-07 13:02:27 +02:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
if cs < minChunkSize {
return errors . Errorf ( "%s is less than %s" , cs , minChunkSize )
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
2018-10-13 23:45:17 +02:00
f . fillBufferTokens ( ) // reset the buffer tokens
}
return
}
func checkUploadCutoff ( opt * Options , cs fs . SizeSuffix ) error {
if cs < opt . ChunkSize {
return errors . Errorf ( "%v is less than chunk size %v" , cs , opt . ChunkSize )
}
return nil
}
func ( f * Fs ) setUploadCutoff ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadCutoff ( & f . opt , cs )
if err == nil {
old , f . opt . UploadCutoff = f . opt . UploadCutoff , cs
2018-09-07 13:02:27 +02:00
}
return
}
2019-02-07 18:41:17 +01:00
// NewFs constructs an Fs from the path, bucket:path
2018-05-14 19:06:57 +02:00
func NewFs ( name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-10-13 23:45:17 +02:00
err = checkUploadCutoff ( opt , opt . UploadCutoff )
if err != nil {
return nil , errors . Wrap ( err , "b2: upload cutoff" )
2016-06-15 19:49:11 +02:00
}
2018-09-07 13:02:27 +02:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
return nil , errors . Wrap ( err , "b2: chunk size" )
2016-06-15 19:49:11 +02:00
}
2015-11-27 19:25:52 +01:00
bucket , directory , err := parsePath ( root )
if err != nil {
return nil , err
}
2018-05-14 19:06:57 +02:00
if opt . Account == "" {
2015-11-27 19:25:52 +01:00
return nil , errors . New ( "account not found" )
}
2018-05-14 19:06:57 +02:00
if opt . Key == "" {
2015-11-27 19:25:52 +01:00
return nil , errors . New ( "key not found" )
}
2018-05-14 19:06:57 +02:00
if opt . Endpoint == "" {
opt . Endpoint = defaultEndpoint
}
2016-02-23 22:19:33 +01:00
f := & Fs {
2018-10-13 23:45:17 +02:00
name : name ,
opt : * opt ,
bucket : bucket ,
root : directory ,
srv : rest . NewClient ( fshttp . NewClient ( fs . Config ) ) . SetErrorHandler ( errorHandler ) ,
pacer : pacer . New ( ) . SetMinSleep ( minSleep ) . SetMaxSleep ( maxSleep ) . SetDecayConstant ( decayConstant ) ,
2016-07-01 11:04:52 +02:00
}
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
ReadMimeType : true ,
WriteMimeType : true ,
BucketBased : true ,
} ) . Fill ( f )
2016-07-01 12:30:09 +02:00
// Set the test flag if required
2018-05-14 19:06:57 +02:00
if opt . TestMode != "" {
testMode := strings . TrimSpace ( opt . TestMode )
2016-07-01 12:30:09 +02:00
f . srv . SetHeader ( testModeHeader , testMode )
2017-02-09 12:01:20 +01:00
fs . Debugf ( f , "Setting test header \"%s: %s\"" , testModeHeader , testMode )
2016-07-01 12:30:09 +02:00
}
2018-10-13 23:45:17 +02:00
f . fillBufferTokens ( )
2016-02-23 23:15:20 +01:00
err = f . authorizeAccount ( )
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "failed to authorize account" )
2015-11-27 19:25:52 +01:00
}
2018-08-18 20:05:32 +02:00
// If this is a key limited to a single bucket, it must exist already
if f . bucket != "" && f . info . Allowed . BucketID != "" {
2018-12-14 11:10:13 +01:00
allowedBucket := f . info . Allowed . BucketName
if allowedBucket == "" {
return nil , errors . New ( "bucket that application key is restricted to no longer exists" )
}
if allowedBucket != f . bucket {
return nil , errors . Errorf ( "you must use bucket %q with this application key" , allowedBucket )
}
2018-08-18 20:05:32 +02:00
f . markBucketOK ( )
f . setBucketID ( f . info . Allowed . BucketID )
}
2015-11-27 19:25:52 +01:00
if f . root != "" {
f . root += "/"
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f . root
remote := path . Base ( directory )
f . root = path . Dir ( directory )
if f . root == "." {
f . root = ""
} else {
f . root += "/"
}
2016-06-25 22:23:20 +02:00
_ , err := f . NewObject ( remote )
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
f . root = oldRoot
return f , nil
}
return nil , err
2015-11-27 19:25:52 +01:00
}
2016-06-25 22:23:20 +02:00
// return an error with an fs which points to the parent
return f , fs . ErrorIsFile
2015-11-27 19:25:52 +01:00
}
return f , nil
}
2016-02-23 22:19:33 +01:00
// authorizeAccount gets the API endpoint and auth token. Can be used
// for reauthentication too.
func ( f * Fs ) authorizeAccount ( ) error {
f . authMu . Lock ( )
defer f . authMu . Unlock ( )
opts := rest . Opts {
Method : "GET" ,
2017-07-07 09:18:13 +02:00
Path : "/b2api/v1/b2_authorize_account" ,
2018-05-14 19:06:57 +02:00
RootURL : f . opt . Endpoint ,
UserName : f . opt . Account ,
Password : f . opt . Key ,
2016-02-23 22:19:33 +01:00
ExtraHeaders : map [ string ] string { "Authorization" : "" } , // unset the Authorization for this request
}
2016-02-23 23:15:20 +01:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( & opts , nil , & f . info )
return f . shouldRetryNoReauth ( resp , err )
} )
2016-02-23 22:19:33 +01:00
if err != nil {
2016-06-12 16:06:02 +02:00
return errors . Wrap ( err , "failed to authenticate" )
2016-02-23 22:19:33 +01:00
}
f . srv . SetRoot ( f . info . APIURL + "/b2api/v1" ) . SetHeader ( "Authorization" , f . info . AuthorizationToken )
return nil
}
2016-02-27 14:00:35 +01:00
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func ( f * Fs ) getUploadURL ( ) ( upload * api . GetUploadURLResponse , err error ) {
2015-11-27 19:25:52 +01:00
f . uploadMu . Lock ( )
defer f . uploadMu . Unlock ( )
bucketID , err := f . getBucketID ( )
if err != nil {
2016-02-27 14:00:35 +01:00
return nil , err
2015-11-27 19:25:52 +01:00
}
2016-02-27 14:00:35 +01:00
if len ( f . uploads ) == 0 {
2015-11-27 19:25:52 +01:00
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_get_upload_url" ,
}
var request = api . GetUploadURLRequest {
BucketID : bucketID ,
}
2016-02-23 23:15:20 +01:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2016-02-27 14:00:35 +01:00
resp , err := f . srv . CallJSON ( & opts , & request , & upload )
2016-06-15 19:49:11 +02:00
return f . shouldRetry ( resp , err )
2016-02-23 23:15:20 +01:00
} )
2015-11-27 19:25:52 +01:00
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "failed to get upload URL" )
2015-11-27 19:25:52 +01:00
}
2016-02-27 14:00:35 +01:00
} else {
upload , f . uploads = f . uploads [ 0 ] , f . uploads [ 1 : ]
2015-11-27 19:25:52 +01:00
}
2016-02-27 14:00:35 +01:00
return upload , nil
}
// returnUploadURL returns the UploadURL to the cache
func ( f * Fs ) returnUploadURL ( upload * api . GetUploadURLResponse ) {
2016-06-15 19:49:11 +02:00
if upload == nil {
return
}
2016-02-27 14:00:35 +01:00
f . uploadMu . Lock ( )
f . uploads = append ( f . uploads , upload )
f . uploadMu . Unlock ( )
2015-11-27 19:25:52 +01:00
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func ( f * Fs ) clearUploadURL ( ) {
f . uploadMu . Lock ( )
2016-02-27 14:00:35 +01:00
f . uploads = nil
f . uploadMu . Unlock ( )
2015-11-27 19:25:52 +01:00
}
2018-10-13 23:45:17 +02:00
// Fill up (or reset) the buffer tokens
func ( f * Fs ) fillBufferTokens ( ) {
f . bufferTokens = make ( chan [ ] byte , fs . Config . Transfers )
for i := 0 ; i < fs . Config . Transfers ; i ++ {
f . bufferTokens <- nil
}
}
2017-01-29 23:21:39 +01:00
// getUploadBlock gets a block from the pool of size chunkSize
func ( f * Fs ) getUploadBlock ( ) [ ] byte {
buf := <- f . bufferTokens
if buf == nil {
2018-05-14 19:06:57 +02:00
buf = make ( [ ] byte , f . opt . ChunkSize )
2016-10-10 16:57:56 +02:00
}
2017-02-09 12:01:20 +01:00
// fs.Debugf(f, "Getting upload block %p", buf)
2017-01-29 23:21:39 +01:00
return buf
2016-10-10 16:57:56 +02:00
}
2017-01-29 23:21:39 +01:00
// putUploadBlock returns a block to the pool of size chunkSize
func ( f * Fs ) putUploadBlock ( buf [ ] byte ) {
buf = buf [ : cap ( buf ) ]
2018-05-14 19:06:57 +02:00
if len ( buf ) != int ( f . opt . ChunkSize ) {
2017-01-29 23:21:39 +01:00
panic ( "bad blocksize returned to pool" )
2016-10-10 16:57:56 +02:00
}
2017-02-09 12:01:20 +01:00
// fs.Debugf(f, "Returning upload block %p", buf)
2017-01-29 23:21:39 +01:00
f . bufferTokens <- buf
2016-10-10 16:57:56 +02:00
}
2016-06-25 22:58:34 +02:00
// Return an Object from a path
2015-11-27 19:25:52 +01:00
//
2016-06-25 22:23:20 +02:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) newObjectWithInfo ( remote string , info * api . File ) ( fs . Object , error ) {
2015-11-27 19:25:52 +01:00
o := & Object {
fs : f ,
remote : remote ,
}
if info != nil {
2016-03-22 15:39:56 +01:00
err := o . decodeMetaData ( info )
if err != nil {
2016-06-25 22:23:20 +02:00
return nil , err
2016-03-22 15:39:56 +01:00
}
2015-11-27 19:25:52 +01:00
} else {
err := o . readMetaData ( ) // reads info and headers, returning an error
if err != nil {
2016-06-25 22:23:20 +02:00
return nil , err
2015-11-27 19:25:52 +01:00
}
}
2016-06-25 22:23:20 +02:00
return o , nil
2015-11-27 19:25:52 +01:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) NewObject ( remote string ) ( fs . Object , error ) {
2016-06-25 22:58:34 +02:00
return f . newObjectWithInfo ( remote , nil )
2015-11-27 19:25:52 +01:00
}
// listFn is called from list to handle an object
2016-04-21 21:06:21 +02:00
type listFn func ( remote string , object * api . File , isDirectory bool ) error
2015-11-27 19:25:52 +01:00
2016-02-19 13:09:11 +01:00
// errEndList is a sentinel used to end the list iteration now.
// listFn should return it to end the iteration with no errors.
var errEndList = errors . New ( "end list" )
2015-11-27 19:25:52 +01:00
// list lists the objects into the function supplied from
// the bucket and root supplied
//
2016-12-14 18:37:26 +01:00
// dir is the starting directory, "" for root
//
2016-04-21 21:06:21 +02:00
// level is the depth to search to
//
2015-11-27 19:25:52 +01:00
// If prefix is set then startFileName is used as a prefix which all
// files must have
//
// If limit is > 0 then it limits to that many files (must be less
// than 1000)
//
// If hidden is set then it will list the hidden (deleted) files too.
2017-06-11 23:43:31 +02:00
func ( f * Fs ) list ( dir string , recurse bool , prefix string , limit int , hidden bool , fn listFn ) error {
2016-04-23 22:46:52 +02:00
root := f . root
if dir != "" {
root += dir + "/"
}
2016-12-14 18:37:26 +01:00
delimiter := ""
2017-06-11 23:43:31 +02:00
if ! recurse {
2016-12-14 18:37:26 +01:00
delimiter = "/"
}
2015-11-27 19:25:52 +01:00
bucketID , err := f . getBucketID ( )
if err != nil {
return err
}
chunkSize := 1000
if limit > 0 {
chunkSize = limit
}
var request = api . ListFileNamesRequest {
BucketID : bucketID ,
MaxFileCount : chunkSize ,
2016-12-14 18:37:26 +01:00
Prefix : root ,
Delimiter : delimiter ,
2015-11-27 19:25:52 +01:00
}
2016-04-23 22:46:52 +02:00
prefix = root + prefix
2015-11-27 19:25:52 +01:00
if prefix != "" {
request . StartFileName = prefix
}
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_list_file_names" ,
}
if hidden {
opts . Path = "/b2_list_file_versions"
}
for {
2017-03-01 08:57:10 +01:00
var response api . ListFileNamesResponse
2016-02-23 23:15:20 +01:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( & opts , & request , & response )
return f . shouldRetry ( resp , err )
} )
2015-11-27 19:25:52 +01:00
if err != nil {
return err
}
for i := range response . Files {
file := & response . Files [ i ]
// Finish if file name no longer has prefix
2016-12-14 18:37:26 +01:00
if prefix != "" && ! strings . HasPrefix ( file . Name , prefix ) {
2015-11-27 19:25:52 +01:00
return nil
}
2016-12-14 18:37:26 +01:00
if ! strings . HasPrefix ( file . Name , f . root ) {
2017-02-09 18:08:51 +01:00
fs . Debugf ( f , "Odd name received %q" , file . Name )
2016-12-14 18:37:26 +01:00
continue
}
2016-04-21 21:06:21 +02:00
remote := file . Name [ len ( f . root ) : ]
2016-12-14 18:37:26 +01:00
// Check for directory
2017-06-11 23:43:31 +02:00
isDirectory := strings . HasSuffix ( remote , "/" )
2016-12-14 18:37:26 +01:00
if isDirectory {
remote = remote [ : len ( remote ) - 1 ]
2016-04-21 21:06:21 +02:00
}
2016-12-14 18:37:26 +01:00
// Send object
err = fn ( remote , file , isDirectory )
if err != nil {
if err == errEndList {
return nil
2016-03-22 15:39:56 +01:00
}
2016-12-14 18:37:26 +01:00
return err
2015-11-27 19:25:52 +01:00
}
}
// end if no NextFileName
if response . NextFileName == nil {
break
}
request . StartFileName = * response . NextFileName
if response . NextFileID != nil {
request . StartFileID = * response . NextFileID
}
}
return nil
}
2017-06-30 11:54:14 +02:00
// Convert a list item into a DirEntry
func ( f * Fs ) itemToDirEntry ( remote string , object * api . File , isDirectory bool , last * string ) ( fs . DirEntry , error ) {
2017-06-11 23:43:31 +02:00
if isDirectory {
2017-06-30 14:37:29 +02:00
d := fs . NewDir ( remote , time . Time { } )
2017-06-11 23:43:31 +02:00
return d , nil
}
if remote == * last {
remote = object . UploadTimestamp . AddVersion ( remote )
} else {
* last = remote
}
// hide objects represent deleted files which we don't list
if object . Action == "hide" {
return nil , nil
}
o , err := f . newObjectWithInfo ( remote , object )
if err != nil {
return nil , err
}
return o , nil
}
2018-03-01 13:11:34 +01:00
// mark the bucket as being OK
func ( f * Fs ) markBucketOK ( ) {
if f . bucket != "" {
f . bucketOKMu . Lock ( )
f . bucketOK = true
f . bucketOKMu . Unlock ( )
}
}
2017-06-11 23:43:31 +02:00
// listDir lists a single directory
func ( f * Fs ) listDir ( dir string ) ( entries fs . DirEntries , err error ) {
2016-07-05 12:26:02 +02:00
last := ""
2018-05-14 19:06:57 +02:00
err = f . list ( dir , false , "" , 0 , f . opt . Versions , func ( remote string , object * api . File , isDirectory bool ) error {
2017-06-11 23:43:31 +02:00
entry , err := f . itemToDirEntry ( remote , object , isDirectory , & last )
if err != nil {
return err
}
if entry != nil {
entries = append ( entries , entry )
2016-04-21 21:06:21 +02:00
}
return nil
} )
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2016-04-21 21:06:21 +02:00
}
2018-03-01 13:11:34 +01:00
// bucket must be present if listing succeeded
f . markBucketOK ( )
2017-06-11 23:43:31 +02:00
return entries , nil
2016-04-21 21:06:21 +02:00
}
// listBuckets returns all the buckets to out
2017-06-11 23:43:31 +02:00
func ( f * Fs ) listBuckets ( dir string ) ( entries fs . DirEntries , err error ) {
2016-04-23 22:46:52 +02:00
if dir != "" {
2017-06-11 23:43:31 +02:00
return nil , fs . ErrorListBucketRequired
2016-04-23 22:46:52 +02:00
}
2017-06-11 23:43:31 +02:00
err = f . listBucketsToFn ( func ( bucket * api . Bucket ) error {
2017-06-30 14:37:29 +02:00
d := fs . NewDir ( bucket . Name , time . Time { } )
2017-06-11 23:43:31 +02:00
entries = append ( entries , d )
2016-04-21 21:06:21 +02:00
return nil
} )
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2016-04-21 21:06:21 +02:00
}
2017-06-11 23:43:31 +02:00
return entries , nil
2016-04-21 21:06:21 +02:00
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func ( f * Fs ) List ( dir string ) ( entries fs . DirEntries , err error ) {
2016-04-21 21:06:21 +02:00
if f . bucket == "" {
2017-06-11 23:43:31 +02:00
return f . listBuckets ( dir )
2015-11-27 19:25:52 +01:00
}
2017-06-11 23:43:31 +02:00
return f . listDir ( dir )
2015-11-27 19:25:52 +01:00
}
2017-06-05 17:14:24 +02:00
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
2017-06-11 23:43:31 +02:00
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func ( f * Fs ) ListR ( dir string , callback fs . ListRCallback ) ( err error ) {
if f . bucket == "" {
return fs . ErrorListBucketRequired
}
2018-01-12 17:30:54 +01:00
list := walk . NewListRHelper ( callback )
2017-06-11 23:43:31 +02:00
last := ""
2018-05-14 19:06:57 +02:00
err = f . list ( dir , true , "" , 0 , f . opt . Versions , func ( remote string , object * api . File , isDirectory bool ) error {
2017-06-11 23:43:31 +02:00
entry , err := f . itemToDirEntry ( remote , object , isDirectory , & last )
if err != nil {
return err
}
return list . Add ( entry )
} )
if err != nil {
return err
}
2018-03-01 13:11:34 +01:00
// bucket must be present if listing succeeded
f . markBucketOK ( )
2017-06-11 23:43:31 +02:00
return list . Flush ( )
2017-06-05 17:14:24 +02:00
}
2016-04-21 21:06:21 +02:00
// listBucketFn is called from listBucketsToFn to handle a bucket
type listBucketFn func ( * api . Bucket ) error
2015-11-27 19:25:52 +01:00
2016-04-21 21:06:21 +02:00
// listBucketsToFn lists the buckets to the function supplied
func ( f * Fs ) listBucketsToFn ( fn listBucketFn ) error {
2018-08-01 15:33:01 +02:00
var account = api . ListBucketsRequest {
AccountID : f . info . AccountID ,
BucketID : f . info . Allowed . BucketID ,
}
2015-11-27 19:25:52 +01:00
var response api . ListBucketsResponse
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_list_buckets" ,
}
2016-02-23 23:15:20 +01:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( & opts , & account , & response )
return f . shouldRetry ( resp , err )
} )
2015-11-27 19:25:52 +01:00
if err != nil {
return err
}
for i := range response . Buckets {
2016-04-21 21:06:21 +02:00
err = fn ( & response . Buckets [ i ] )
if err != nil {
return err
}
2015-11-27 19:25:52 +01:00
}
return nil
}
// getBucketID finds the ID for the current bucket name
func ( f * Fs ) getBucketID ( ) ( bucketID string , err error ) {
f . bucketIDMutex . Lock ( )
defer f . bucketIDMutex . Unlock ( )
if f . _bucketID != "" {
return f . _bucketID , nil
}
2016-04-21 21:06:21 +02:00
err = f . listBucketsToFn ( func ( bucket * api . Bucket ) error {
2015-11-27 19:25:52 +01:00
if bucket . Name == f . bucket {
bucketID = bucket . ID
}
2016-04-21 21:06:21 +02:00
return nil
2015-11-27 19:25:52 +01:00
} )
if bucketID == "" {
2016-06-12 16:06:02 +02:00
err = fs . ErrorDirNotFound
2015-11-27 19:25:52 +01:00
}
f . _bucketID = bucketID
return bucketID , err
}
// setBucketID sets the ID for the current bucket name
func ( f * Fs ) setBucketID ( ID string ) {
f . bucketIDMutex . Lock ( )
f . _bucketID = ID
f . bucketIDMutex . Unlock ( )
}
// clearBucketID clears the ID for the current bucket name
func ( f * Fs ) clearBucketID ( ) {
f . bucketIDMutex . Lock ( )
f . _bucketID = ""
f . bucketIDMutex . Unlock ( )
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2017-05-28 13:44:22 +02:00
func ( f * Fs ) Put ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2015-11-27 19:25:52 +01:00
// Temporary Object under construction
fs := & Object {
fs : f ,
2016-02-18 12:35:25 +01:00
remote : src . Remote ( ) ,
2015-11-27 19:25:52 +01:00
}
2017-07-05 23:16:07 +02:00
return fs , fs . Update ( in , src , options ... )
2015-11-27 19:25:52 +01:00
}
2017-09-16 22:43:48 +02:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
func ( f * Fs ) PutStream ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( in , src , options ... )
}
2015-11-27 19:25:52 +01:00
// Mkdir creates the bucket if it doesn't exist
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Mkdir ( dir string ) error {
2017-06-07 15:16:50 +02:00
f . bucketOKMu . Lock ( )
defer f . bucketOKMu . Unlock ( )
if f . bucketOK {
2016-11-25 22:52:43 +01:00
return nil
}
2015-11-27 19:25:52 +01:00
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_create_bucket" ,
}
var request = api . CreateBucketRequest {
AccountID : f . info . AccountID ,
Name : f . bucket ,
Type : "allPrivate" ,
}
var response api . Bucket
2016-02-23 23:15:20 +01:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( & opts , & request , & response )
return f . shouldRetry ( resp , err )
} )
2015-11-27 19:25:52 +01:00
if err != nil {
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . Code == "duplicate_bucket_name" {
2016-08-25 22:43:43 +02:00
// Check this is our bucket - buckets are globally unique and this
// might be someone elses.
_ , getBucketErr := f . getBucketID ( )
if getBucketErr == nil {
// found so it is our bucket
2017-06-07 15:16:50 +02:00
f . bucketOK = true
2016-08-25 22:43:43 +02:00
return nil
}
if getBucketErr != fs . ErrorDirNotFound {
2017-02-09 12:01:20 +01:00
fs . Debugf ( f , "Error checking bucket exists: %v" , getBucketErr )
2016-08-25 22:43:43 +02:00
}
2015-11-27 19:25:52 +01:00
}
}
2016-06-12 16:06:02 +02:00
return errors . Wrap ( err , "failed to create bucket" )
2015-11-27 19:25:52 +01:00
}
f . setBucketID ( response . ID )
2017-06-07 15:16:50 +02:00
f . bucketOK = true
2015-11-27 19:25:52 +01:00
return nil
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Rmdir ( dir string ) error {
2017-06-07 15:16:50 +02:00
f . bucketOKMu . Lock ( )
defer f . bucketOKMu . Unlock ( )
2016-11-25 22:52:43 +01:00
if f . root != "" || dir != "" {
2015-11-27 19:25:52 +01:00
return nil
}
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_delete_bucket" ,
}
bucketID , err := f . getBucketID ( )
if err != nil {
return err
}
var request = api . DeleteBucketRequest {
ID : bucketID ,
AccountID : f . info . AccountID ,
}
var response api . Bucket
2016-02-23 23:15:20 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( & opts , & request , & response )
return f . shouldRetry ( resp , err )
} )
2015-11-27 19:25:52 +01:00
if err != nil {
2016-06-12 16:06:02 +02:00
return errors . Wrap ( err , "failed to delete bucket" )
2015-11-27 19:25:52 +01:00
}
2017-06-07 15:16:50 +02:00
f . bucketOK = false
2015-11-27 19:25:52 +01:00
f . clearBucketID ( )
f . clearUploadURL ( )
return nil
}
// Precision of the remote
func ( f * Fs ) Precision ( ) time . Duration {
2016-03-24 16:23:27 +01:00
return time . Millisecond
2015-11-27 19:25:52 +01:00
}
2017-07-23 14:02:42 +02:00
// hide hides a file on the remote
func ( f * Fs ) hide ( Name string ) error {
bucketID , err := f . getBucketID ( )
if err != nil {
return err
}
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_hide_file" ,
}
var request = api . HideFileRequest {
BucketID : bucketID ,
Name : Name ,
}
var response api . File
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( & opts , & request , & response )
return f . shouldRetry ( resp , err )
} )
if err != nil {
return errors . Wrapf ( err , "failed to hide %q" , Name )
}
return nil
}
2015-11-27 19:25:52 +01:00
// deleteByID deletes a file version given Name and ID
func ( f * Fs ) deleteByID ( ID , Name string ) error {
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_delete_file_version" ,
}
var request = api . DeleteFileRequest {
ID : ID ,
Name : Name ,
}
var response api . File
2016-02-23 23:15:20 +01:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( & opts , & request , & response )
return f . shouldRetry ( resp , err )
} )
2015-11-27 19:25:52 +01:00
if err != nil {
2016-06-12 16:06:02 +02:00
return errors . Wrapf ( err , "failed to delete %q" , Name )
2015-11-27 19:25:52 +01:00
}
return nil
}
2016-07-02 18:03:08 +02:00
// purge deletes all the files and directories
//
// if oldOnly is true then it deletes only non current files.
2015-11-27 19:25:52 +01:00
//
// Implemented here so we can make sure we delete old versions.
2016-07-02 18:03:08 +02:00
func ( f * Fs ) purge ( oldOnly bool ) error {
2015-11-27 19:25:52 +01:00
var errReturn error
var checkErrMutex sync . Mutex
var checkErr = func ( err error ) {
if err == nil {
return
}
checkErrMutex . Lock ( )
defer checkErrMutex . Unlock ( )
if errReturn == nil {
errReturn = err
}
}
2018-12-02 19:05:32 +01:00
var isUnfinishedUploadStale = func ( timestamp api . Timestamp ) bool {
if time . Since ( time . Time ( timestamp ) ) . Hours ( ) > 24 {
return true
}
return false
}
2015-11-27 19:25:52 +01:00
// Delete Config.Transfers in parallel
toBeDeleted := make ( chan * api . File , fs . Config . Transfers )
var wg sync . WaitGroup
wg . Add ( fs . Config . Transfers )
for i := 0 ; i < fs . Config . Transfers ; i ++ {
go func ( ) {
defer wg . Done ( )
for object := range toBeDeleted {
2018-01-12 17:30:54 +01:00
accounting . Stats . Checking ( object . Name )
2015-11-27 19:25:52 +01:00
checkErr ( f . deleteByID ( object . ID , object . Name ) )
2018-01-12 17:30:54 +01:00
accounting . Stats . DoneChecking ( object . Name )
2015-11-27 19:25:52 +01:00
}
} ( )
}
2016-07-02 18:03:08 +02:00
last := ""
2017-06-11 23:43:31 +02:00
checkErr ( f . list ( "" , true , "" , 0 , true , func ( remote string , object * api . File , isDirectory bool ) error {
2016-04-21 21:06:21 +02:00
if ! isDirectory {
2018-01-12 17:30:54 +01:00
accounting . Stats . Checking ( remote )
2016-07-02 18:03:08 +02:00
if oldOnly && last != remote {
2016-08-18 19:36:00 +02:00
if object . Action == "hide" {
2017-02-09 12:01:20 +01:00
fs . Debugf ( remote , "Deleting current version (id %q) as it is a hide marker" , object . ID )
2016-08-18 19:36:00 +02:00
toBeDeleted <- object
2018-12-02 19:05:32 +01:00
} else if object . Action == "start" && isUnfinishedUploadStale ( object . UploadTimestamp ) {
fs . Debugf ( remote , "Deleting current version (id %q) as it is a start marker (upload started at %s)" , object . ID , time . Time ( object . UploadTimestamp ) . Local ( ) )
toBeDeleted <- object
2016-08-18 19:36:00 +02:00
} else {
2017-02-09 12:01:20 +01:00
fs . Debugf ( remote , "Not deleting current version (id %q) %q" , object . ID , object . Action )
2016-08-18 19:36:00 +02:00
}
2016-07-02 18:03:08 +02:00
} else {
2017-02-09 12:01:20 +01:00
fs . Debugf ( remote , "Deleting (id %q)" , object . ID )
2016-07-02 18:03:08 +02:00
toBeDeleted <- object
}
last = remote
2018-01-12 17:30:54 +01:00
accounting . Stats . DoneChecking ( remote )
2016-04-21 21:06:21 +02:00
}
2015-11-27 19:25:52 +01:00
return nil
} ) )
close ( toBeDeleted )
wg . Wait ( )
2016-07-02 18:03:08 +02:00
if ! oldOnly {
2016-11-25 22:52:43 +01:00
checkErr ( f . Rmdir ( "" ) )
2016-07-02 18:03:08 +02:00
}
2015-11-27 19:25:52 +01:00
return errReturn
}
2016-07-02 18:03:08 +02:00
// Purge deletes all the files and directories including the old versions.
func ( f * Fs ) Purge ( ) error {
return f . purge ( false )
}
// CleanUp deletes all the hidden files.
func ( f * Fs ) CleanUp ( ) error {
return f . purge ( true )
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-18 21:27:52 +01:00
return hash . Set ( hash . SHA1 )
2016-01-11 13:39:33 +01:00
}
2015-11-27 19:25:52 +01:00
// ------------------------------------------------------------
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-11-27 19:25:52 +01:00
return o . fs
}
// Return a string version
func ( o * Object ) String ( ) string {
if o == nil {
return "<nil>"
}
return o . remote
}
// Remote returns the remote path
func ( o * Object ) Remote ( ) string {
return o . remote
}
2016-01-11 13:39:33 +01:00
// Hash returns the Sha-1 of an object returning a lowercase hex string
2018-01-12 17:30:54 +01:00
func ( o * Object ) Hash ( t hash . Type ) ( string , error ) {
2018-01-18 21:27:52 +01:00
if t != hash . SHA1 {
return "" , hash . ErrUnsupported
2016-01-11 13:39:33 +01:00
}
2016-01-19 09:20:23 +01:00
if o . sha1 == "" {
2016-03-22 15:39:56 +01:00
// Error is logged in readMetaData
err := o . readMetaData ( )
2016-01-19 09:20:23 +01:00
if err != nil {
return "" , err
}
2016-01-18 18:53:03 +01:00
}
2016-01-11 13:39:33 +01:00
return o . sha1 , nil
2015-11-27 19:25:52 +01:00
}
// Size returns the size of an object in bytes
func ( o * Object ) Size ( ) int64 {
2016-03-22 15:39:56 +01:00
return o . size
}
2016-06-15 19:49:11 +02:00
// decodeMetaDataRaw sets the metadata from the data passed in
2016-03-22 15:39:56 +01:00
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
2016-09-21 23:13:24 +02:00
func ( o * Object ) decodeMetaDataRaw ( ID , SHA1 string , Size int64 , UploadTimestamp api . Timestamp , Info map [ string ] string , mimeType string ) ( err error ) {
2016-06-15 19:49:11 +02:00
o . id = ID
o . sha1 = SHA1
2016-09-21 23:13:24 +02:00
o . mimeType = mimeType
2016-06-15 19:49:11 +02:00
// Read SHA1 from metadata if it exists and isn't set
if o . sha1 == "" || o . sha1 == "none" {
o . sha1 = Info [ sha1Key ]
}
o . size = Size
2016-03-22 15:39:56 +01:00
// Use the UploadTimestamp if can't get file info
2016-06-15 19:49:11 +02:00
o . modTime = time . Time ( UploadTimestamp )
return o . parseTimeString ( Info [ timeKey ] )
}
// decodeMetaData sets the metadata in the object from an api.File
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func ( o * Object ) decodeMetaData ( info * api . File ) ( err error ) {
2016-09-21 23:13:24 +02:00
return o . decodeMetaDataRaw ( info . ID , info . SHA1 , info . Size , info . UploadTimestamp , info . Info , info . ContentType )
2016-06-15 19:49:11 +02:00
}
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func ( o * Object ) decodeMetaDataFileInfo ( info * api . FileInfo ) ( err error ) {
2016-09-21 23:13:24 +02:00
return o . decodeMetaDataRaw ( info . ID , info . SHA1 , info . Size , info . UploadTimestamp , info . Info , info . ContentType )
2015-11-27 19:25:52 +01:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
2016-03-22 15:39:56 +01:00
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
2015-11-27 19:25:52 +01:00
func ( o * Object ) readMetaData ( ) ( err error ) {
2016-03-22 15:39:56 +01:00
if o . id != "" {
2015-11-27 19:25:52 +01:00
return nil
}
2016-07-05 12:26:02 +02:00
maxSearched := 1
var timestamp api . Timestamp
baseRemote := o . remote
2018-05-14 19:06:57 +02:00
if o . fs . opt . Versions {
2016-07-05 12:26:02 +02:00
timestamp , baseRemote = api . RemoveVersion ( baseRemote )
maxSearched = maxVersions
}
2016-03-22 15:39:56 +01:00
var info * api . File
2018-05-14 19:06:57 +02:00
err = o . fs . list ( "" , true , baseRemote , maxSearched , o . fs . opt . Versions , func ( remote string , object * api . File , isDirectory bool ) error {
2016-04-21 21:06:21 +02:00
if isDirectory {
return nil
}
2016-07-05 12:26:02 +02:00
if remote == baseRemote {
if ! timestamp . IsZero ( ) && ! timestamp . Equal ( object . UploadTimestamp ) {
return nil
}
2016-03-22 15:39:56 +01:00
info = object
2015-11-27 19:25:52 +01:00
}
2016-02-19 13:09:11 +01:00
return errEndList // read only 1 item
2015-11-27 19:25:52 +01:00
} )
2016-03-22 15:39:56 +01:00
if err != nil {
2016-06-25 22:23:20 +02:00
if err == fs . ErrorDirNotFound {
return fs . ErrorObjectNotFound
}
2016-03-22 15:39:56 +01:00
return err
}
if info == nil {
2016-06-25 22:23:20 +02:00
return fs . ErrorObjectNotFound
2015-11-27 19:25:52 +01:00
}
2016-03-22 15:39:56 +01:00
return o . decodeMetaData ( info )
2015-11-27 19:25:52 +01:00
}
// timeString returns modTime as the number of milliseconds
// elapsed since January 1, 1970 UTC as a decimal string.
func timeString ( modTime time . Time ) string {
return strconv . FormatInt ( modTime . UnixNano ( ) / 1E6 , 10 )
}
// parseTimeString converts a decimal string number of milliseconds
2016-03-22 11:26:37 +01:00
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.
func ( o * Object ) parseTimeString ( timeString string ) ( err error ) {
2015-11-27 19:25:52 +01:00
if timeString == "" {
2016-03-22 11:26:37 +01:00
return nil
2015-11-27 19:25:52 +01:00
}
unixMilliseconds , err := strconv . ParseInt ( timeString , 10 , 64 )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Failed to parse mod time string %q: %v" , timeString , err )
2016-03-22 11:26:37 +01:00
return err
2015-11-27 19:25:52 +01:00
}
2016-03-22 11:26:37 +01:00
o . modTime = time . Unix ( unixMilliseconds / 1E3 , ( unixMilliseconds % 1E3 ) * 1E6 ) . UTC ( )
return nil
2016-01-11 13:39:33 +01:00
}
2016-03-22 11:26:37 +01:00
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
//
// SHA-1 will also be updated once the request has completed.
func ( o * Object ) ModTime ( ) ( result time . Time ) {
2016-03-22 15:39:56 +01:00
// The error is logged in readMetaData
_ = o . readMetaData ( )
2016-03-22 11:26:37 +01:00
return o . modTime
}
2015-11-27 19:25:52 +01:00
// SetModTime sets the modification time of the local fs object
2016-03-22 16:07:10 +01:00
func ( o * Object ) SetModTime ( modTime time . Time ) error {
2015-11-27 19:25:52 +01:00
// Not possible with B2
2016-03-22 16:07:10 +01:00
return fs . ErrorCantSetModTime
2015-11-27 19:25:52 +01:00
}
// Storable returns if this object is storable
func ( o * Object ) Storable ( ) bool {
return true
}
// openFile represents an Object open for reading
type openFile struct {
o * Object // Object we are reading for
resp * http . Response // response of the GET
body io . Reader // reading from here
2018-01-12 17:30:54 +01:00
hash gohash . Hash // currently accumulating SHA1
2015-11-27 19:25:52 +01:00
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
}
// newOpenFile wraps an io.ReadCloser and checks the sha1sum
func newOpenFile ( o * Object , resp * http . Response ) * openFile {
file := & openFile {
o : o ,
resp : resp ,
hash : sha1 . New ( ) ,
}
file . body = io . TeeReader ( resp . Body , file . hash )
return file
}
// Read bytes from the object - see io.Reader
func ( file * openFile ) Read ( p [ ] byte ) ( n int , err error ) {
n , err = file . body . Read ( p )
file . bytes += int64 ( n )
if err == io . EOF {
file . eof = true
}
return
}
// Close the object and checks the length and SHA1 if all the object
// was read
func ( file * openFile ) Close ( ) ( err error ) {
// Close the body at the end
defer fs . CheckClose ( file . resp . Body , & err )
// If not end of file then can't check SHA1
if ! file . eof {
return nil
}
// Check to see we read the correct number of bytes
if file . o . Size ( ) != file . bytes {
2016-06-12 16:06:02 +02:00
return errors . Errorf ( "object corrupted on transfer - length mismatch (want %d got %d)" , file . o . Size ( ) , file . bytes )
2015-11-27 19:25:52 +01:00
}
// Check the SHA1
2016-09-05 18:26:04 +02:00
receivedSHA1 := file . o . sha1
2015-11-27 19:25:52 +01:00
calculatedSHA1 := fmt . Sprintf ( "%x" , file . hash . Sum ( nil ) )
2017-08-31 22:19:54 +02:00
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
2016-06-12 16:06:02 +02:00
return errors . Errorf ( "object corrupted on transfer - SHA1 mismatch (want %q got %q)" , receivedSHA1 , calculatedSHA1 )
2015-11-27 19:25:52 +01:00
}
return nil
}
// Check it satisfies the interfaces
var _ io . ReadCloser = & openFile { }
// Open an object for read
2016-09-10 12:29:57 +02:00
func ( o * Object ) Open ( options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2015-11-27 19:25:52 +01:00
opts := rest . Opts {
2017-07-07 09:18:13 +02:00
Method : "GET" ,
Options : options ,
2016-07-05 12:26:02 +02:00
}
2019-02-09 22:56:24 +01:00
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o . fs . opt . DownloadURL == "" {
opts . RootURL = o . fs . info . DownloadURL
} else {
opts . RootURL = o . fs . opt . DownloadURL
}
2016-07-05 12:26:02 +02:00
// Download by id if set otherwise by name
if o . id != "" {
opts . Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode ( o . id )
} else {
opts . Path += "/file/" + urlEncode ( o . fs . bucket ) + "/" + urlEncode ( o . fs . root + o . remote )
2015-11-27 19:25:52 +01:00
}
2016-02-23 23:15:20 +01:00
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err = o . fs . srv . Call ( & opts )
return o . fs . shouldRetry ( resp , err )
} )
2015-11-27 19:25:52 +01:00
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "failed to open for download" )
2015-11-27 19:25:52 +01:00
}
// Parse the time out of the headers if possible
2016-03-22 11:26:37 +01:00
err = o . parseTimeString ( resp . Header . Get ( timeHeader ) )
2015-11-27 19:25:52 +01:00
if err != nil {
2016-03-22 11:26:37 +01:00
_ = resp . Body . Close ( )
return nil , err
2015-11-27 19:25:52 +01:00
}
2016-09-05 18:26:04 +02:00
// Read sha1 from header if it isn't set
2016-01-11 13:39:33 +01:00
if o . sha1 == "" {
o . sha1 = resp . Header . Get ( sha1Header )
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Reading sha1 from header - %q" , o . sha1 )
2016-09-05 18:26:04 +02:00
// if sha1 header is "none" (in big files), then need
// to read it from the metadata
if o . sha1 == "none" {
o . sha1 = resp . Header . Get ( sha1InfoHeader )
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Reading sha1 from info - %q" , o . sha1 )
2016-09-05 18:26:04 +02:00
}
2016-01-11 13:39:33 +01:00
}
2016-10-07 13:16:25 +02:00
// Don't check length or hash on partial content
if resp . StatusCode == http . StatusPartialContent {
return resp . Body , nil
}
2015-11-27 19:25:52 +01:00
return newOpenFile ( o , resp ) , nil
}
// dontEncode is the characters that do not need percent-encoding
//
// The characters that do not need percent-encoding are a subset of
// the printable ASCII characters: upper-case letters, lower-case
// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
// be replaced with "%" and the two-digit hex value of the byte.
const dontEncode = ( ` abcdefghijklmnopqrstuvwxyz ` +
` ABCDEFGHIJKLMNOPQRSTUVWXYZ ` +
` 0123456789 ` +
` ._-/~!$'()*;=:@ ` )
// noNeedToEncode is a bitmap of characters which don't need % encoding
var noNeedToEncode [ 256 ] bool
func init ( ) {
for _ , c := range dontEncode {
noNeedToEncode [ c ] = true
}
}
// urlEncode encodes in with % encoding
func urlEncode ( in string ) string {
var out bytes . Buffer
for i := 0 ; i < len ( in ) ; i ++ {
c := in [ i ]
if noNeedToEncode [ c ] {
_ = out . WriteByte ( c )
} else {
_ , _ = out . WriteString ( fmt . Sprintf ( "%%%2X" , c ) )
}
}
return out . String ( )
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2017-05-28 13:44:22 +02:00
func ( o * Object ) Update ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( err error ) {
2018-05-14 19:06:57 +02:00
if o . fs . opt . Versions {
2016-07-05 12:26:02 +02:00
return errNotWithVersions
}
2017-06-07 15:16:50 +02:00
err = o . fs . Mkdir ( "" )
if err != nil {
return err
}
2016-02-18 12:35:25 +01:00
size := src . Size ( )
2016-06-15 19:49:11 +02:00
2017-09-16 22:43:48 +02:00
if size == - 1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o . fs . getUploadBlock ( )
n , err := io . ReadFull ( in , buf )
if err == nil {
bufReader := bufio . NewReader ( in )
in = bufReader
_ , err = bufReader . Peek ( 1 )
}
if err == nil {
fs . Debugf ( o , "File is big enough for chunked streaming" )
up , err := o . fs . newLargeUpload ( o , in , src )
if err != nil {
o . fs . putUploadBlock ( buf )
return err
}
return up . Stream ( buf )
} else if err == io . EOF || err == io . ErrUnexpectedEOF {
fs . Debugf ( o , "File has %d bytes, which makes only one chunk. Using direct upload." , n )
defer o . fs . putUploadBlock ( buf )
size = int64 ( n )
in = bytes . NewReader ( buf [ : n ] )
} else {
return err
}
2018-05-14 19:06:57 +02:00
} else if size > int64 ( o . fs . opt . UploadCutoff ) {
2016-06-15 19:49:11 +02:00
up , err := o . fs . newLargeUpload ( o , in , src )
if err != nil {
return err
}
return up . Upload ( )
}
2016-02-18 12:35:25 +01:00
modTime := src . ModTime ( )
2016-02-19 15:45:32 +01:00
2018-01-18 21:27:52 +01:00
calculatedSha1 , _ := src . Hash ( hash . SHA1 )
2016-02-19 15:45:32 +01:00
if calculatedSha1 == "" {
2017-08-12 12:57:34 +02:00
calculatedSha1 = "hex_digits_at_end"
har := newHashAppendingReader ( in , sha1 . New ( ) )
size += int64 ( har . AdditionalLength ( ) )
in = har
2015-11-27 19:25:52 +01:00
}
// Get upload URL
2016-02-27 14:00:35 +01:00
upload , err := o . fs . getUploadURL ( )
2015-11-27 19:25:52 +01:00
if err != nil {
return err
}
2017-01-17 18:34:21 +01:00
defer func ( ) {
// return it like this because we might nil it out
o . fs . returnUploadURL ( upload )
} ( )
2015-11-27 19:25:52 +01:00
// Headers for upload file
//
// Authorization
// required
// An upload authorization token, from b2_get_upload_url.
//
// X-Bz-File-Name
// required
//
// The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding.
//
// Content-Type
// required
//
// The MIME type of the content of the file, which will be returned in
// the Content-Type header when downloading the file. Use the
// Content-Type b2/x-auto to automatically set the stored Content-Type
// post upload. In the case where a file extension is absent or the
// lookup fails, the Content-Type is set to application/octet-stream. The
2019-02-07 18:41:17 +01:00
// Content-Type mappings can be pursued here.
2015-11-27 19:25:52 +01:00
//
// X-Bz-Content-Sha1
// required
//
// The SHA1 checksum of the content of the file. B2 will check this when
// the file is uploaded, to make sure that the file arrived correctly. It
// will be returned in the X-Bz-Content-Sha1 header when the file is
// downloaded.
//
// X-Bz-Info-src_last_modified_millis
// optional
//
// If the original source of the file being uploaded has a last modified
// time concept, Backblaze recommends using this spelling of one of your
// ten X-Bz-Info-* headers (see below). Using a standard spelling allows
// different B2 clients and the B2 web user interface to interoperate
// correctly. The value should be a base 10 number which represents a UTC
// time when the original source file was last modified. It is a base 10
// number of milliseconds since midnight, January 1, 1970 UTC. This fits
// in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the Java call
// Date.setTime(long time).
//
// X-Bz-Info-*
// optional
//
// Up to 10 of these headers may be present. The * part of the header
// name is replace with the name of a custom field in the file
// information stored with the file, and the value is an arbitrary UTF-8
// string, percent-encoded. The same info headers sent with the upload
// will be returned with the download.
opts := rest . Opts {
2017-07-07 09:18:13 +02:00
Method : "POST" ,
RootURL : upload . UploadURL ,
Body : in ,
2015-11-27 19:25:52 +01:00
ExtraHeaders : map [ string ] string {
2016-02-27 14:00:35 +01:00
"Authorization" : upload . AuthorizationToken ,
2015-11-27 19:25:52 +01:00
"X-Bz-File-Name" : urlEncode ( o . fs . root + o . remote ) ,
2016-09-21 23:13:24 +02:00
"Content-Type" : fs . MimeType ( src ) ,
2015-11-27 19:25:52 +01:00
sha1Header : calculatedSha1 ,
timeHeader : timeString ( modTime ) ,
} ,
ContentLength : & size ,
}
var response api . FileInfo
2016-02-23 23:15:20 +01:00
// Don't retry, return a retry error instead
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
resp , err := o . fs . srv . CallJSON ( & opts , nil , & response )
2016-11-07 14:30:51 +01:00
retry , err := o . fs . shouldRetry ( resp , err )
2016-07-01 17:23:23 +02:00
// On retryable error clear UploadURL
if retry {
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Clearing upload URL because of error: %v" , err )
2016-06-15 19:49:11 +02:00
upload = nil
}
2016-07-01 17:23:23 +02:00
return retry , err
2016-02-23 23:15:20 +01:00
} )
2015-11-27 19:25:52 +01:00
if err != nil {
2016-02-23 23:15:20 +01:00
return err
2015-11-27 19:25:52 +01:00
}
2016-06-15 19:49:11 +02:00
return o . decodeMetaDataFileInfo ( & response )
2015-11-27 19:25:52 +01:00
}
// Remove an object
func ( o * Object ) Remove ( ) error {
2018-05-14 19:06:57 +02:00
if o . fs . opt . Versions {
2016-07-05 12:26:02 +02:00
return errNotWithVersions
}
2018-05-14 19:06:57 +02:00
if o . fs . opt . HardDelete {
2017-12-11 19:54:28 +01:00
return o . fs . deleteByID ( o . id , o . fs . root + o . remote )
2015-11-27 19:25:52 +01:00
}
2017-07-23 14:02:42 +02:00
return o . fs . hide ( o . fs . root + o . remote )
2015-11-27 19:25:52 +01:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
func ( o * Object ) MimeType ( ) string {
return o . mimeType
}
2018-05-13 10:16:56 +02:00
// ID returns the ID of the Object if known, or "" if not
func ( o * Object ) ID ( ) string {
return o . id
}
2015-11-27 19:25:52 +01:00
// Check the interfaces are satisfied
var (
2017-09-16 22:43:48 +02:00
_ fs . Fs = & Fs { }
_ fs . Purger = & Fs { }
_ fs . PutStreamer = & Fs { }
_ fs . CleanUpper = & Fs { }
_ fs . ListRer = & Fs { }
_ fs . Object = & Object { }
_ fs . MimeTyper = & Object { }
2018-05-13 10:16:56 +02:00
_ fs . IDer = & Object { }
2015-11-27 19:25:52 +01:00
)