2017-07-25 16:18:13 +02:00
|
|
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
2018-07-13 17:21:49 +02:00
|
|
|
|
2019-04-15 21:03:33 +02:00
|
|
|
// +build !plan9,!solaris
|
2018-07-13 17:21:49 +02:00
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
package azureblob
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-07-13 17:21:49 +02:00
|
|
|
"context"
|
2018-09-07 06:43:40 +02:00
|
|
|
"crypto/md5"
|
2017-07-25 16:18:13 +02:00
|
|
|
"encoding/base64"
|
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
2018-03-24 15:01:23 +01:00
|
|
|
"net/url"
|
2017-07-25 16:18:13 +02:00
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-11-30 13:08:22 +01:00
|
|
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
2018-10-25 17:35:30 +02:00
|
|
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
2017-07-25 16:18:13 +02:00
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
2020-01-14 18:33:35 +01:00
|
|
|
"github.com/rclone/rclone/fs/config"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/fs/walk"
|
2019-08-16 11:10:56 +02:00
|
|
|
"github.com/rclone/rclone/lib/bucket"
|
2020-01-14 18:33:35 +01:00
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/lib/pacer"
|
2020-04-25 19:36:18 +02:00
|
|
|
"github.com/rclone/rclone/lib/pool"
|
2020-05-13 22:29:21 +02:00
|
|
|
"github.com/rclone/rclone/lib/readers"
|
|
|
|
"golang.org/x/sync/errgroup"
|
2017-07-25 16:18:13 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2018-07-13 17:21:49 +02:00
|
|
|
minSleep = 10 * time.Millisecond
|
|
|
|
maxSleep = 10 * time.Second
|
|
|
|
decayConstant = 1 // bigger for slower decay, exponential
|
2018-09-10 21:45:06 +02:00
|
|
|
maxListChunkSize = 5000 // number of items to read at once
|
2018-07-13 17:21:49 +02:00
|
|
|
modTimeKey = "mtime"
|
|
|
|
timeFormatIn = time.RFC3339
|
|
|
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
|
|
|
maxTotalParts = 50000 // in multipart upload
|
|
|
|
storageDefaultBaseURL = "blob.core.windows.net"
|
2018-05-04 16:31:55 +02:00
|
|
|
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
2018-09-07 13:02:27 +02:00
|
|
|
defaultChunkSize = 4 * fs.MebiByte
|
|
|
|
maxChunkSize = 100 * fs.MebiByte
|
|
|
|
defaultUploadCutoff = 256 * fs.MebiByte
|
|
|
|
maxUploadCutoff = 256 * fs.MebiByte
|
2018-08-19 18:53:59 +02:00
|
|
|
defaultAccessTier = azblob.AccessTierNone
|
2018-11-05 19:33:57 +01:00
|
|
|
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
2019-06-27 05:46:22 +02:00
|
|
|
// Default storage account, key and blob endpoint for emulator support,
|
|
|
|
// though it is a base64 key checked in here, it is publicly available secret.
|
|
|
|
emulatorAccount = "devstoreaccount1"
|
|
|
|
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
|
|
|
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
2020-04-25 19:36:18 +02:00
|
|
|
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
|
|
|
memoryPoolUseMmap = false
|
2017-07-25 16:18:13 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Register with Fs
|
|
|
|
func init() {
|
|
|
|
fs.Register(&fs.RegInfo{
|
|
|
|
Name: "azureblob",
|
|
|
|
Description: "Microsoft Azure Blob Storage",
|
|
|
|
NewFs: NewFs,
|
|
|
|
Options: []fs.Option{{
|
|
|
|
Name: "account",
|
2019-06-28 03:53:33 +02:00
|
|
|
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
2017-07-25 16:18:13 +02:00
|
|
|
}, {
|
|
|
|
Name: "key",
|
2019-06-28 03:53:33 +02:00
|
|
|
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
2018-03-24 15:01:23 +01:00
|
|
|
}, {
|
|
|
|
Name: "sas_url",
|
2019-06-28 03:53:33 +02:00
|
|
|
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
2019-06-27 05:46:22 +02:00
|
|
|
}, {
|
|
|
|
Name: "use_emulator",
|
|
|
|
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
|
|
|
Default: false,
|
2017-07-25 16:18:13 +02:00
|
|
|
}, {
|
2018-05-14 19:06:57 +02:00
|
|
|
Name: "endpoint",
|
|
|
|
Help: "Endpoint for the service\nLeave blank normally.",
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
|
|
|
Name: "upload_cutoff",
|
2018-10-01 19:36:15 +02:00
|
|
|
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
2019-01-11 18:17:46 +01:00
|
|
|
Default: defaultUploadCutoff,
|
2018-05-14 19:06:57 +02:00
|
|
|
Advanced: true,
|
|
|
|
}, {
|
2018-10-01 19:36:15 +02:00
|
|
|
Name: "chunk_size",
|
|
|
|
Help: `Upload chunk size (<= 100MB).
|
|
|
|
|
|
|
|
Note that this is stored in memory and there may be up to
|
|
|
|
"--transfers" chunks stored at once in memory.`,
|
2019-01-11 18:17:46 +01:00
|
|
|
Default: defaultChunkSize,
|
2018-05-14 19:06:57 +02:00
|
|
|
Advanced: true,
|
2018-09-10 21:45:06 +02:00
|
|
|
}, {
|
2018-10-01 19:36:15 +02:00
|
|
|
Name: "list_chunk",
|
|
|
|
Help: `Size of blob list.
|
|
|
|
|
|
|
|
This sets the number of blobs requested in each listing chunk. Default
|
|
|
|
is the maximum, 5000. "List blobs" requests are permitted 2 minutes
|
|
|
|
per megabyte to complete. If an operation is taking longer than 2
|
|
|
|
minutes per megabyte on average, it will time out (
|
|
|
|
[source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval)
|
|
|
|
). This can be used to limit the number of blobs items to return, to
|
|
|
|
avoid the time out.`,
|
2018-09-10 21:45:06 +02:00
|
|
|
Default: maxListChunkSize,
|
|
|
|
Advanced: true,
|
2018-08-19 18:53:59 +02:00
|
|
|
}, {
|
|
|
|
Name: "access_tier",
|
2018-10-01 19:36:15 +02:00
|
|
|
Help: `Access tier of blob: hot, cool or archive.
|
|
|
|
|
|
|
|
Archived blobs can be restored by setting access tier to hot or
|
|
|
|
cool. Leave blank if you intend to use default access tier, which is
|
|
|
|
set at account level
|
|
|
|
|
|
|
|
If there is no "access tier" specified, rclone doesn't apply any tier.
|
|
|
|
rclone performs "Set Tier" operation on blobs while uploading, if objects
|
|
|
|
are not modified, specifying "access tier" to new one will have no effect.
|
|
|
|
If blobs are in "archive tier" at remote, trying to perform data transfer
|
|
|
|
operations from remote will not be allowed. User should first restore by
|
|
|
|
tiering blob to "Hot" or "Cool".`,
|
2018-08-19 18:53:59 +02:00
|
|
|
Advanced: true,
|
2020-04-23 20:47:14 +02:00
|
|
|
}, {
|
|
|
|
Name: "disable_checksum",
|
|
|
|
Help: `Don't store MD5 checksum with object metadata.
|
|
|
|
|
|
|
|
Normally rclone will calculate the MD5 checksum of the input before
|
|
|
|
uploading it so it can add it to metadata on the object. This is great
|
|
|
|
for data integrity checking but can cause long delays for large files
|
|
|
|
to start uploading.`,
|
|
|
|
Default: false,
|
|
|
|
Advanced: true,
|
2020-04-25 19:36:18 +02:00
|
|
|
}, {
|
|
|
|
Name: "memory_pool_flush_time",
|
|
|
|
Default: memoryPoolFlushTime,
|
|
|
|
Advanced: true,
|
|
|
|
Help: `How often internal memory buffer pools will be flushed.
|
|
|
|
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
|
|
|
This option controls how often unused buffers will be removed from the pool.`,
|
|
|
|
}, {
|
|
|
|
Name: "memory_pool_use_mmap",
|
|
|
|
Default: memoryPoolUseMmap,
|
|
|
|
Advanced: true,
|
|
|
|
Help: `Whether to use mmap buffers in internal memory pool.`,
|
2020-01-14 18:33:35 +01:00
|
|
|
}, {
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
Help: config.ConfigEncodingHelp,
|
|
|
|
Advanced: true,
|
2020-01-14 22:51:49 +01:00
|
|
|
Default: (encoder.EncodeInvalidUtf8 |
|
|
|
|
encoder.EncodeSlash |
|
|
|
|
encoder.EncodeCtl |
|
|
|
|
encoder.EncodeDel |
|
|
|
|
encoder.EncodeBackSlash |
|
|
|
|
encoder.EncodeRightPeriod),
|
2018-05-14 19:06:57 +02:00
|
|
|
}},
|
2017-07-25 16:18:13 +02:00
|
|
|
})
|
2018-05-14 19:06:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Options defines the configuration for this backend
|
|
|
|
type Options struct {
|
2020-04-25 19:36:18 +02:00
|
|
|
Account string `config:"account"`
|
|
|
|
Key string `config:"key"`
|
|
|
|
Endpoint string `config:"endpoint"`
|
|
|
|
SASURL string `config:"sas_url"`
|
|
|
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
|
|
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
|
|
|
ListChunkSize uint `config:"list_chunk"`
|
|
|
|
AccessTier string `config:"access_tier"`
|
|
|
|
UseEmulator bool `config:"use_emulator"`
|
|
|
|
DisableCheckSum bool `config:"disable_checksum"`
|
|
|
|
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
|
|
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fs represents a remote azure server
|
|
|
|
type Fs struct {
|
2019-08-16 11:10:56 +02:00
|
|
|
name string // name of this remote
|
|
|
|
root string // the path we are working on if any
|
|
|
|
opt Options // parsed config options
|
|
|
|
features *fs.Features // optional features
|
|
|
|
client *http.Client // http client we are using
|
|
|
|
svcURL *azblob.ServiceURL // reference to serviceURL
|
|
|
|
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
|
|
|
|
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
|
|
|
|
rootContainer string // container part of root (if any)
|
|
|
|
rootDirectory string // directory part of root (if any)
|
|
|
|
isLimited bool // if limited to one container
|
|
|
|
cache *bucket.Cache // cache for container creation status
|
|
|
|
pacer *fs.Pacer // To pace and retry the API calls
|
|
|
|
uploadToken *pacer.TokenDispenser // control concurrency
|
2020-04-25 19:36:18 +02:00
|
|
|
pool *pool.Pool // memory pool
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2020-05-20 12:39:20 +02:00
|
|
|
// Object describes an azure object
|
2017-07-25 16:18:13 +02:00
|
|
|
type Object struct {
|
2018-07-13 17:21:49 +02:00
|
|
|
fs *Fs // what this object is part of
|
|
|
|
remote string // The remote path
|
|
|
|
modTime time.Time // The modified time of the object if known
|
|
|
|
md5 string // MD5 hash if known
|
|
|
|
size int64 // Size of the object
|
|
|
|
mimeType string // Content-Type of the object
|
|
|
|
accessTier azblob.AccessTierType // Blob Access Tier
|
|
|
|
meta map[string]string // blob metadata
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
return f.name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Root() string {
|
2019-08-16 11:10:56 +02:00
|
|
|
return f.root
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// String converts this Fs to a string
|
|
|
|
func (f *Fs) String() string {
|
2019-08-16 11:10:56 +02:00
|
|
|
if f.rootContainer == "" {
|
2020-02-19 21:47:50 +01:00
|
|
|
return "Azure root"
|
2019-08-16 11:10:56 +02:00
|
|
|
}
|
|
|
|
if f.rootDirectory == "" {
|
|
|
|
return fmt.Sprintf("Azure container %s", f.rootContainer)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
return f.features
|
|
|
|
}
|
|
|
|
|
2019-08-16 11:10:56 +02:00
|
|
|
// parsePath parses a remote 'url'
|
|
|
|
func parsePath(path string) (root string) {
|
|
|
|
root = strings.Trim(path, "/")
|
2017-07-25 16:18:13 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-16 11:10:56 +02:00
|
|
|
// split returns container and containerPath from the rootRelativePath
|
|
|
|
// relative to f.root
|
|
|
|
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
2019-09-21 13:36:16 +02:00
|
|
|
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
2020-01-14 18:33:35 +01:00
|
|
|
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
|
2019-08-16 11:10:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// split returns container and containerPath from the object
|
|
|
|
func (o *Object) split() (container, containerPath string) {
|
|
|
|
return o.fs.split(o.remote)
|
|
|
|
}
|
|
|
|
|
2018-09-11 03:55:06 +02:00
|
|
|
// validateAccessTier checks if azureblob supports user supplied tier
|
|
|
|
func validateAccessTier(tier string) bool {
|
|
|
|
switch tier {
|
|
|
|
case string(azblob.AccessTierHot),
|
|
|
|
string(azblob.AccessTierCool),
|
|
|
|
string(azblob.AccessTierArchive):
|
|
|
|
// valid cases
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// retryErrorCodes is a slice of error codes that we will retry
|
|
|
|
var retryErrorCodes = []int{
|
|
|
|
401, // Unauthorized (eg "Token has expired")
|
|
|
|
408, // Request Timeout
|
|
|
|
429, // Rate exceeded.
|
|
|
|
500, // Get occasional 500 Internal Server Error
|
|
|
|
503, // Service Unavailable
|
|
|
|
504, // Gateway Time-out
|
|
|
|
}
|
|
|
|
|
|
|
|
// shouldRetry returns a boolean as to whether this resp and err
|
|
|
|
// deserve to be retried. It returns the err as a convenience
|
|
|
|
func (f *Fs) shouldRetry(err error) (bool, error) {
|
|
|
|
// FIXME interpret special errors - more to do here
|
2018-07-13 17:21:49 +02:00
|
|
|
if storageErr, ok := err.(azblob.StorageError); ok {
|
2020-04-23 20:19:45 +02:00
|
|
|
switch storageErr.ServiceCode() {
|
|
|
|
case "InvalidBlobOrBlock":
|
|
|
|
// These errors happen sometimes in multipart uploads
|
|
|
|
// because of block concurrency issues
|
|
|
|
return true, err
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
statusCode := storageErr.Response().StatusCode
|
2017-07-25 16:18:13 +02:00
|
|
|
for _, e := range retryErrorCodes {
|
|
|
|
if statusCode == e {
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
return fserrors.ShouldRetry(err), err
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 13:02:27 +02:00
|
|
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
|
|
|
const minChunkSize = fs.Byte
|
|
|
|
if cs < minChunkSize {
|
|
|
|
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
|
|
|
}
|
|
|
|
if cs > maxChunkSize {
|
|
|
|
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
|
|
err = checkUploadChunkSize(cs)
|
|
|
|
if err == nil {
|
|
|
|
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-10-13 23:45:17 +02:00
|
|
|
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
|
|
|
if cs > maxUploadCutoff {
|
|
|
|
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
|
|
err = checkUploadCutoff(cs)
|
|
|
|
if err == nil {
|
|
|
|
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-30 13:08:22 +01:00
|
|
|
// httpClientFactory creates a Factory object that sends HTTP requests
|
2020-05-20 12:39:20 +02:00
|
|
|
// to an rclone's http.Client.
|
2018-11-30 13:08:22 +01:00
|
|
|
//
|
|
|
|
// copied from azblob.newDefaultHTTPClientFactory
|
|
|
|
func httpClientFactory(client *http.Client) pipeline.Factory {
|
|
|
|
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
|
|
|
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
|
|
|
r, err := client.Do(request.WithContext(ctx))
|
|
|
|
if err != nil {
|
|
|
|
err = pipeline.NewError(err, "HTTP request failed")
|
|
|
|
}
|
|
|
|
return pipeline.NewHTTPResponse(r), err
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// newPipeline creates a Pipeline using the specified credentials and options.
|
|
|
|
//
|
|
|
|
// this code was copied from azblob.NewPipeline
|
|
|
|
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
2019-10-20 17:38:37 +02:00
|
|
|
// Don't log stuff to syslog/Windows Event log
|
|
|
|
pipeline.SetForceLogEnabled(false)
|
|
|
|
|
2018-11-30 13:08:22 +01:00
|
|
|
// Closest to API goes first; closest to the wire goes last
|
|
|
|
factories := []pipeline.Factory{
|
|
|
|
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
|
|
|
azblob.NewUniqueRequestIDPolicyFactory(),
|
|
|
|
azblob.NewRetryPolicyFactory(o.Retry),
|
|
|
|
c,
|
|
|
|
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
|
|
|
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
|
|
|
}
|
|
|
|
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
|
|
|
}
|
|
|
|
|
2019-08-16 11:10:56 +02:00
|
|
|
// setRoot changes the root of the Fs
|
|
|
|
func (f *Fs) setRoot(root string) {
|
|
|
|
f.root = parsePath(root)
|
|
|
|
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
|
|
|
}
|
|
|
|
|
2019-02-07 18:41:17 +01:00
|
|
|
// NewFs constructs an Fs from the path, container:path
|
2018-05-14 19:06:57 +02:00
|
|
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2019-06-17 10:34:30 +02:00
|
|
|
ctx := context.Background()
|
2018-05-14 19:06:57 +02:00
|
|
|
// Parse config into Options struct
|
|
|
|
opt := new(Options)
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2018-05-14 19:06:57 +02:00
|
|
|
|
2018-10-13 23:45:17 +02:00
|
|
|
err = checkUploadCutoff(opt.UploadCutoff)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "azure: upload cutoff")
|
2018-05-14 19:06:57 +02:00
|
|
|
}
|
2018-09-07 13:02:27 +02:00
|
|
|
err = checkUploadChunkSize(opt.ChunkSize)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "azure: chunk size")
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2018-09-10 21:45:06 +02:00
|
|
|
if opt.ListChunkSize > maxListChunkSize {
|
|
|
|
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
|
|
|
}
|
2018-05-14 19:06:57 +02:00
|
|
|
if opt.Endpoint == "" {
|
|
|
|
opt.Endpoint = storageDefaultBaseURL
|
|
|
|
}
|
2017-07-25 16:18:13 +02:00
|
|
|
|
2018-08-19 18:53:59 +02:00
|
|
|
if opt.AccessTier == "" {
|
|
|
|
opt.AccessTier = string(defaultAccessTier)
|
2018-09-11 03:55:06 +02:00
|
|
|
} else if !validateAccessTier(opt.AccessTier) {
|
|
|
|
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
|
|
|
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
2018-08-19 18:53:59 +02:00
|
|
|
}
|
|
|
|
|
2018-11-30 13:08:22 +01:00
|
|
|
f := &Fs{
|
|
|
|
name: name,
|
|
|
|
opt: *opt,
|
2019-02-09 21:52:15 +01:00
|
|
|
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
2018-11-30 13:08:22 +01:00
|
|
|
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
|
|
|
client: fshttp.NewClient(fs.Config),
|
2019-08-16 11:10:56 +02:00
|
|
|
cache: bucket.NewCache(),
|
|
|
|
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
2020-04-25 19:36:18 +02:00
|
|
|
pool: pool.New(
|
|
|
|
time.Duration(opt.MemoryPoolFlushTime),
|
|
|
|
int(opt.ChunkSize),
|
|
|
|
fs.Config.Transfers,
|
|
|
|
opt.MemoryPoolUseMmap,
|
|
|
|
),
|
2018-11-30 13:08:22 +01:00
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
f.setRoot(root)
|
2018-11-30 13:08:22 +01:00
|
|
|
f.features = (&fs.Features{
|
2019-08-16 11:10:56 +02:00
|
|
|
ReadMimeType: true,
|
|
|
|
WriteMimeType: true,
|
|
|
|
BucketBased: true,
|
|
|
|
BucketBasedRootOK: true,
|
|
|
|
SetTier: true,
|
|
|
|
GetTier: true,
|
2018-11-30 13:08:22 +01:00
|
|
|
}).Fill(f)
|
|
|
|
|
2018-03-24 15:01:23 +01:00
|
|
|
var (
|
2019-08-16 11:10:56 +02:00
|
|
|
u *url.URL
|
|
|
|
serviceURL azblob.ServiceURL
|
2018-03-24 15:01:23 +01:00
|
|
|
)
|
|
|
|
switch {
|
2019-06-27 05:46:22 +02:00
|
|
|
case opt.UseEmulator:
|
|
|
|
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
|
|
|
}
|
|
|
|
u, err = url.Parse(emulatorBlobEndpoint)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
|
|
|
}
|
|
|
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
|
|
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
2018-05-14 19:06:57 +02:00
|
|
|
case opt.Account != "" && opt.Key != "":
|
2018-09-07 06:43:40 +02:00
|
|
|
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
|
|
|
}
|
|
|
|
|
2018-05-14 19:06:57 +02:00
|
|
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
2018-03-24 15:01:23 +01:00
|
|
|
if err != nil {
|
2018-07-13 17:21:49 +02:00
|
|
|
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
2018-03-24 15:01:23 +01:00
|
|
|
}
|
2018-11-30 13:08:22 +01:00
|
|
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
2018-07-13 17:21:49 +02:00
|
|
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
2018-05-14 19:06:57 +02:00
|
|
|
case opt.SASURL != "":
|
|
|
|
u, err = url.Parse(opt.SASURL)
|
2018-03-24 15:01:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
// use anonymous credentials in case of sas url
|
2018-11-30 13:08:22 +01:00
|
|
|
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
2018-07-13 17:21:49 +02:00
|
|
|
// Check if we have container level SAS or account level sas
|
|
|
|
parts := azblob.NewBlobURLParts(*u)
|
|
|
|
if parts.ContainerName != "" {
|
2019-08-16 11:10:56 +02:00
|
|
|
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
2018-07-13 17:21:49 +02:00
|
|
|
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
containerURL := azblob.NewContainerURL(*u, pipeline)
|
|
|
|
f.cntURLcache[parts.ContainerName] = &containerURL
|
|
|
|
f.isLimited = true
|
2018-07-13 17:21:49 +02:00
|
|
|
} else {
|
|
|
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
2018-03-24 15:01:23 +01:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
return nil, errors.New("Need account+key or connectionString or sasURL")
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2018-11-30 13:08:22 +01:00
|
|
|
f.svcURL = &serviceURL
|
2017-07-25 16:18:13 +02:00
|
|
|
|
2019-08-16 11:10:56 +02:00
|
|
|
if f.rootContainer != "" && f.rootDirectory != "" {
|
2017-07-25 16:18:13 +02:00
|
|
|
// Check to see if the (container,directory) is actually an existing file
|
|
|
|
oldRoot := f.root
|
2019-08-16 11:10:56 +02:00
|
|
|
newRoot, leaf := path.Split(oldRoot)
|
|
|
|
f.setRoot(newRoot)
|
|
|
|
_, err := f.NewObject(ctx, leaf)
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
2019-01-04 13:32:28 +01:00
|
|
|
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
|
|
|
// File doesn't exist or is a directory so return old f
|
2019-08-16 11:10:56 +02:00
|
|
|
f.setRoot(oldRoot)
|
2017-07-25 16:18:13 +02:00
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// return an error with an fs which points to the parent
|
|
|
|
return f, fs.ErrorIsFile
|
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
|
2019-08-16 11:10:56 +02:00
|
|
|
// return the container URL for the container passed in
|
|
|
|
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
|
|
|
|
f.cntURLcacheMu.Lock()
|
|
|
|
defer f.cntURLcacheMu.Unlock()
|
|
|
|
var ok bool
|
|
|
|
if containerURL, ok = f.cntURLcache[container]; !ok {
|
|
|
|
cntURL := f.svcURL.NewContainerURL(container)
|
|
|
|
containerURL = &cntURL
|
|
|
|
f.cntURLcache[container] = containerURL
|
|
|
|
}
|
|
|
|
return containerURL
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// Return an Object from a path
|
|
|
|
//
|
|
|
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
2018-07-13 17:21:49 +02:00
|
|
|
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object, error) {
|
2017-07-25 16:18:13 +02:00
|
|
|
o := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
}
|
|
|
|
if info != nil {
|
2018-07-13 17:21:49 +02:00
|
|
|
err := o.decodeMetaDataFromBlob(info)
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err := o.readMetaData() // reads info and headers, returning an error
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return o, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
// it returns the error fs.ErrorObjectNotFound.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
2017-07-25 16:18:13 +02:00
|
|
|
return f.newObjectWithInfo(remote, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// getBlobReference creates an empty blob reference with no metadata
|
2019-08-16 11:10:56 +02:00
|
|
|
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
|
|
|
|
return f.cntURL(container).NewBlobURL(containerPath)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2018-07-13 17:21:49 +02:00
|
|
|
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
|
|
|
func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
2017-07-25 16:18:13 +02:00
|
|
|
// Make sure o.meta is not nil
|
|
|
|
if o.meta == nil {
|
|
|
|
o.meta = make(map[string]string, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set modTimeKey in it
|
|
|
|
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
|
|
|
}
|
|
|
|
|
2019-01-04 13:32:28 +01:00
|
|
|
// Returns whether file is a directory marker or not
|
|
|
|
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
|
|
|
// Directory markers are 0 length
|
|
|
|
if size == 0 {
|
|
|
|
// Note that metadata with hdi_isfolder = true seems to be a
|
|
|
|
// defacto standard for marking blobs as directories.
|
|
|
|
endsWithSlash := strings.HasSuffix(remote, "/")
|
|
|
|
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// listFn is called from list to handle an object
|
2018-07-13 17:21:49 +02:00
|
|
|
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
2017-07-25 16:18:13 +02:00
|
|
|
|
|
|
|
// list lists the objects into the function supplied from
|
|
|
|
// the container and root supplied
|
|
|
|
//
|
|
|
|
// dir is the starting directory, "" for root
|
2019-08-16 11:10:56 +02:00
|
|
|
//
|
|
|
|
// The remote has prefix removed from it and if addContainer is set then
|
|
|
|
// it adds the container to the start.
|
|
|
|
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
|
|
|
|
if f.cache.IsDeleted(container) {
|
2017-07-25 16:18:13 +02:00
|
|
|
return fs.ErrorDirNotFound
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
if prefix != "" {
|
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
if directory != "" {
|
|
|
|
directory += "/"
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
delimiter := ""
|
|
|
|
if !recurse {
|
|
|
|
delimiter = "/"
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
|
|
|
|
options := azblob.ListBlobsSegmentOptions{
|
|
|
|
Details: azblob.BlobListingDetails{
|
|
|
|
Copy: false,
|
2017-07-25 16:18:13 +02:00
|
|
|
Metadata: true,
|
2018-07-13 17:21:49 +02:00
|
|
|
Snapshots: false,
|
2017-07-25 16:18:13 +02:00
|
|
|
UncommittedBlobs: false,
|
2018-07-13 17:21:49 +02:00
|
|
|
Deleted: false,
|
2017-07-25 16:18:13 +02:00
|
|
|
},
|
2019-08-16 11:10:56 +02:00
|
|
|
Prefix: directory,
|
2018-07-13 17:21:49 +02:00
|
|
|
MaxResults: int32(maxResults),
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
for marker := (azblob.Marker{}); marker.NotDone(); {
|
|
|
|
var response *azblob.ListBlobsHierarchySegmentResponse
|
2017-07-25 16:18:13 +02:00
|
|
|
err := f.pacer.Call(func() (bool, error) {
|
|
|
|
var err error
|
2019-08-16 11:10:56 +02:00
|
|
|
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
2017-07-25 16:18:13 +02:00
|
|
|
return f.shouldRetry(err)
|
|
|
|
})
|
2018-07-13 17:21:49 +02:00
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
2018-07-13 17:21:49 +02:00
|
|
|
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
2018-07-15 14:49:15 +02:00
|
|
|
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
2017-07-25 16:18:13 +02:00
|
|
|
return fs.ErrorDirNotFound
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
// Advance marker to next
|
|
|
|
marker = response.NextMarker
|
|
|
|
for i := range response.Segment.BlobItems {
|
|
|
|
file := &response.Segment.BlobItems[i]
|
2017-07-25 16:18:13 +02:00
|
|
|
// Finish if file name no longer has prefix
|
|
|
|
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
|
|
|
// return nil
|
|
|
|
// }
|
2020-01-14 18:33:35 +01:00
|
|
|
remote := f.opt.Enc.ToStandardPath(file.Name)
|
2019-09-21 13:36:16 +02:00
|
|
|
if !strings.HasPrefix(remote, prefix) {
|
|
|
|
fs.Debugf(f, "Odd name received %q", remote)
|
2017-07-25 16:18:13 +02:00
|
|
|
continue
|
|
|
|
}
|
2019-09-21 13:36:16 +02:00
|
|
|
remote = remote[len(prefix):]
|
2019-01-04 13:32:28 +01:00
|
|
|
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
|
|
|
continue // skip directory marker
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
if addContainer {
|
|
|
|
remote = path.Join(container, remote)
|
|
|
|
}
|
2017-07-25 16:18:13 +02:00
|
|
|
// Send object
|
2018-12-05 22:34:10 +01:00
|
|
|
err = fn(remote, file, false)
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Send the subdirectories
|
2018-07-13 17:21:49 +02:00
|
|
|
for _, remote := range response.Segment.BlobPrefixes {
|
|
|
|
remote := strings.TrimRight(remote.Name, "/")
|
2020-01-14 18:33:35 +01:00
|
|
|
remote = f.opt.Enc.ToStandardPath(remote)
|
2019-08-16 11:10:56 +02:00
|
|
|
if !strings.HasPrefix(remote, prefix) {
|
2017-07-25 16:18:13 +02:00
|
|
|
fs.Debugf(f, "Odd directory name received %q", remote)
|
|
|
|
continue
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
remote = remote[len(prefix):]
|
|
|
|
if addContainer {
|
|
|
|
remote = path.Join(container, remote)
|
2018-12-05 22:34:10 +01:00
|
|
|
}
|
2017-07-25 16:18:13 +02:00
|
|
|
// Send object
|
|
|
|
err = fn(remote, nil, true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert a list item into a DirEntry
|
2018-07-13 17:21:49 +02:00
|
|
|
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
2017-07-25 16:18:13 +02:00
|
|
|
if isDirectory {
|
|
|
|
d := fs.NewDir(remote, time.Time{})
|
|
|
|
return d, nil
|
|
|
|
}
|
|
|
|
o, err := f.newObjectWithInfo(remote, object)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return o, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// listDir lists a single directory
|
2019-08-16 11:10:56 +02:00
|
|
|
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
|
|
|
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
2017-07-25 16:18:13 +02:00
|
|
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if entry != nil {
|
|
|
|
entries = append(entries, entry)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-03-01 13:11:34 +01:00
|
|
|
// container must be present if listing succeeded
|
2019-08-16 11:10:56 +02:00
|
|
|
f.cache.MarkOK(container)
|
2017-07-25 16:18:13 +02:00
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// listContainers returns all the containers to out
|
2019-08-22 22:30:55 +02:00
|
|
|
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
2019-08-16 11:10:56 +02:00
|
|
|
if f.isLimited {
|
|
|
|
f.cntURLcacheMu.Lock()
|
|
|
|
for container := range f.cntURLcache {
|
|
|
|
d := fs.NewDir(container, time.Time{})
|
|
|
|
entries = append(entries, d)
|
|
|
|
}
|
|
|
|
f.cntURLcacheMu.Unlock()
|
|
|
|
return entries, nil
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
2020-01-14 18:33:35 +01:00
|
|
|
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), container.Properties.LastModified)
|
2019-08-16 11:10:56 +02:00
|
|
|
f.cache.MarkOK(container.Name)
|
2017-07-25 16:18:13 +02:00
|
|
|
entries = append(entries, d)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
// complete directory.
|
|
|
|
//
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
// trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
2019-08-16 11:10:56 +02:00
|
|
|
container, directory := f.split(dir)
|
|
|
|
if container == "" {
|
2019-08-22 22:30:55 +02:00
|
|
|
if directory != "" {
|
|
|
|
return nil, fs.ErrorListBucketRequired
|
|
|
|
}
|
|
|
|
return f.listContainers(ctx)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListR lists the objects and directories of the Fs starting
|
|
|
|
// from dir recursively into out.
|
|
|
|
//
|
|
|
|
// dir should be "" to start from the root, and should not
|
|
|
|
// have trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
|
|
|
//
|
|
|
|
// It should call callback for each tranche of entries read.
|
|
|
|
// These need not be returned in any particular order. If
|
|
|
|
// callback returns an error then the listing will stop
|
|
|
|
// immediately.
|
|
|
|
//
|
|
|
|
// Don't implement this unless you have a more efficient way
|
|
|
|
// of listing recursively that doing a directory traversal.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
2019-08-16 11:10:56 +02:00
|
|
|
container, directory := f.split(dir)
|
2018-01-12 17:30:54 +01:00
|
|
|
list := walk.NewListRHelper(callback)
|
2019-08-16 11:10:56 +02:00
|
|
|
listR := func(container, directory, prefix string, addContainer bool) error {
|
|
|
|
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
|
|
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return list.Add(entry)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if container == "" {
|
2019-08-22 22:30:55 +02:00
|
|
|
entries, err := f.listContainers(ctx)
|
2019-08-16 11:10:56 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
|
|
|
err = list.Add(entry)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
container := entry.Remote()
|
|
|
|
err = listR(container, "", f.rootDirectory, true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-22 22:30:55 +02:00
|
|
|
// container must be present if listing succeeded
|
|
|
|
f.cache.MarkOK(container)
|
2019-08-16 11:10:56 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-22 22:30:55 +02:00
|
|
|
// container must be present if listing succeeded
|
|
|
|
f.cache.MarkOK(container)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
return list.Flush()
|
|
|
|
}
|
|
|
|
|
|
|
|
// listContainerFn is called from listContainersToFn to handle a container
|
2018-07-13 17:21:49 +02:00
|
|
|
type listContainerFn func(*azblob.ContainerItem) error
|
2017-07-25 16:18:13 +02:00
|
|
|
|
|
|
|
// listContainersToFn lists the containers to the function supplied
|
|
|
|
func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
2018-07-13 17:21:49 +02:00
|
|
|
params := azblob.ListContainersSegmentOptions{
|
2018-09-10 21:45:06 +02:00
|
|
|
MaxResults: int32(f.opt.ListChunkSize),
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
for marker := (azblob.Marker{}); marker.NotDone(); {
|
2018-09-07 06:43:40 +02:00
|
|
|
var response *azblob.ListContainersSegmentResponse
|
2018-07-13 17:21:49 +02:00
|
|
|
err := f.pacer.Call(func() (bool, error) {
|
|
|
|
var err error
|
|
|
|
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
|
|
|
|
return f.shouldRetry(err)
|
|
|
|
})
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
|
|
|
|
for i := range response.ContainerItems {
|
|
|
|
err = fn(&response.ContainerItems[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
marker = response.NextMarker
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put the object into the container
|
|
|
|
//
|
|
|
|
// Copy the reader in to the new object which is returned
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2017-07-25 16:18:13 +02:00
|
|
|
// Temporary Object under construction
|
|
|
|
fs := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: src.Remote(),
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
return fs, fs.Update(ctx, in, src, options...)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2020-05-13 22:29:21 +02:00
|
|
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
|
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
return f.Put(ctx, in, src, options...)
|
|
|
|
}
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// Mkdir creates the container if it doesn't exist
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2019-08-16 11:10:56 +02:00
|
|
|
container, _ := f.split(dir)
|
2019-08-22 22:30:55 +02:00
|
|
|
return f.makeContainer(ctx, container)
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeContainer creates the container if it doesn't exist
|
|
|
|
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
2019-08-16 11:10:56 +02:00
|
|
|
return f.cache.Create(container, func() error {
|
2020-05-13 10:11:51 +02:00
|
|
|
// If this is a SAS URL limited to a container then assume it is already created
|
|
|
|
if f.isLimited {
|
|
|
|
return nil
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
// now try to create the container
|
|
|
|
return f.pacer.Call(func() (bool, error) {
|
|
|
|
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
|
|
|
if err != nil {
|
|
|
|
if storageErr, ok := err.(azblob.StorageError); ok {
|
|
|
|
switch storageErr.ServiceCode() {
|
|
|
|
case azblob.ServiceCodeContainerAlreadyExists:
|
|
|
|
return false, nil
|
|
|
|
case azblob.ServiceCodeContainerBeingDeleted:
|
|
|
|
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
|
|
|
// When a container is deleted, a container with the same name cannot be created
|
|
|
|
// for at least 30 seconds; the container may not be available for more than 30
|
|
|
|
// seconds if the service is still processing the request.
|
|
|
|
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
|
|
|
f.cache.MarkDeleted(container)
|
|
|
|
return true, err
|
|
|
|
}
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
return f.shouldRetry(err)
|
|
|
|
})
|
|
|
|
}, nil)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2019-08-16 11:10:56 +02:00
|
|
|
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
|
|
|
|
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
|
2017-07-25 16:18:13 +02:00
|
|
|
empty := true
|
2019-08-16 11:10:56 +02:00
|
|
|
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
2017-07-25 16:18:13 +02:00
|
|
|
empty = false
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !empty {
|
|
|
|
return fs.ErrorDirectoryNotEmpty
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteContainer deletes the container. It can delete a full
|
|
|
|
// container so use isEmpty if you don't want that.
|
2019-08-16 11:10:56 +02:00
|
|
|
func (f *Fs) deleteContainer(ctx context.Context, container string) error {
|
|
|
|
return f.cache.Remove(container, func() error {
|
|
|
|
options := azblob.ContainerAccessConditions{}
|
|
|
|
return f.pacer.Call(func() (bool, error) {
|
|
|
|
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
|
|
|
|
if err == nil {
|
|
|
|
_, err = f.cntURL(container).Delete(ctx, options)
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
|
2019-08-16 11:10:56 +02:00
|
|
|
if err != nil {
|
|
|
|
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
|
|
|
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
|
|
|
return false, fs.ErrorDirNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return f.shouldRetry(err)
|
2018-07-13 17:21:49 +02:00
|
|
|
}
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
return f.shouldRetry(err)
|
2019-08-16 11:10:56 +02:00
|
|
|
})
|
2017-07-25 16:18:13 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir deletes the container if the fs is at the root
|
|
|
|
//
|
|
|
|
// Returns an error if it isn't empty
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
2019-08-16 11:10:56 +02:00
|
|
|
container, directory := f.split(dir)
|
|
|
|
if container == "" || directory != "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err := f.isEmpty(ctx, container, directory)
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
return f.deleteContainer(ctx, container)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Precision of the remote
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
|
|
|
return time.Nanosecond
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hashes returns the supported hash sets.
|
2018-01-12 17:30:54 +01:00
|
|
|
func (f *Fs) Hashes() hash.Set {
|
2018-01-18 21:27:52 +01:00
|
|
|
return hash.Set(hash.MD5)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Purge deletes all the files and directories including the old versions.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Purge(ctx context.Context) error {
|
2017-07-25 16:18:13 +02:00
|
|
|
dir := "" // forward compat!
|
2019-08-16 11:10:56 +02:00
|
|
|
container, directory := f.split(dir)
|
|
|
|
if container == "" || directory != "" {
|
|
|
|
// Delegate to caller if not root of a container
|
2017-07-25 16:18:13 +02:00
|
|
|
return fs.ErrorCantPurge
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
return f.deleteContainer(ctx, container)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy src to this remote using server side copy operations.
|
|
|
|
//
|
|
|
|
// This is stored with the remote path given
|
|
|
|
//
|
|
|
|
// It returns the destination Object and a possible error
|
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantCopy
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2019-08-16 11:10:56 +02:00
|
|
|
dstContainer, dstPath := f.split(remote)
|
2019-08-22 22:30:55 +02:00
|
|
|
err := f.makeContainer(ctx, dstContainer)
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
if !ok {
|
|
|
|
fs.Debugf(src, "Can't copy - not same remote type")
|
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
}
|
2019-08-16 11:10:56 +02:00
|
|
|
dstBlobURL := f.getBlobReference(dstContainer, dstPath)
|
2018-07-13 17:21:49 +02:00
|
|
|
srcBlobURL := srcObj.getBlobReference()
|
|
|
|
|
|
|
|
source, err := url.Parse(srcBlobURL.String())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
options := azblob.BlobAccessConditions{}
|
|
|
|
var startCopy *azblob.BlobStartCopyFromURLResponse
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2018-09-07 06:43:40 +02:00
|
|
|
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options)
|
2017-07-25 16:18:13 +02:00
|
|
|
return f.shouldRetry(err)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
|
|
|
|
copyStatus := startCopy.CopyStatus()
|
|
|
|
for copyStatus == azblob.CopyStatusPending {
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
getMetadata, err := dstBlobURL.GetProperties(ctx, options)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
copyStatus = getMetadata.CopyStatus()
|
|
|
|
}
|
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
return f.NewObject(ctx, remote)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2020-04-25 19:36:18 +02:00
|
|
|
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
|
|
|
if size == int64(f.opt.ChunkSize) {
|
|
|
|
return f.pool
|
|
|
|
}
|
|
|
|
|
|
|
|
return pool.New(
|
|
|
|
time.Duration(f.opt.MemoryPoolFlushTime),
|
|
|
|
int(size),
|
|
|
|
fs.Config.Transfers,
|
|
|
|
f.opt.MemoryPoolUseMmap,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
// Fs returns the parent Fs
|
|
|
|
func (o *Object) Fs() fs.Info {
|
|
|
|
return o.fs
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a string version
|
|
|
|
func (o *Object) String() string {
|
|
|
|
if o == nil {
|
|
|
|
return "<nil>"
|
|
|
|
}
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remote returns the remote path
|
|
|
|
func (o *Object) Remote() string {
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns the MD5 of an object returning a lowercase hex string
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
2018-01-18 21:27:52 +01:00
|
|
|
if t != hash.MD5 {
|
|
|
|
return "", hash.ErrUnsupported
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
// Convert base64 encoded md5 into lower case hex
|
|
|
|
if o.md5 == "" {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
data, err := base64.StdEncoding.DecodeString(o.md5)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5)
|
|
|
|
}
|
|
|
|
return hex.EncodeToString(data), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns the size of an object in bytes
|
|
|
|
func (o *Object) Size() int64 {
|
|
|
|
return o.size
|
|
|
|
}
|
|
|
|
|
2018-07-13 17:21:49 +02:00
|
|
|
func (o *Object) setMetadata(metadata azblob.Metadata) {
|
|
|
|
if len(metadata) > 0 {
|
|
|
|
o.meta = metadata
|
|
|
|
if modTime, ok := metadata[modTimeKey]; ok {
|
2017-07-25 16:18:13 +02:00
|
|
|
when, err := time.Parse(timeFormatIn, modTime)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(o, "Couldn't parse %v = %q: %v", modTimeKey, modTime, err)
|
|
|
|
}
|
|
|
|
o.modTime = when
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
o.meta = nil
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
|
|
|
//
|
|
|
|
// Sets
|
|
|
|
// o.id
|
|
|
|
// o.modTime
|
|
|
|
// o.size
|
|
|
|
// o.md5
|
|
|
|
// o.meta
|
|
|
|
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
2019-01-04 13:32:28 +01:00
|
|
|
metadata := info.NewMetadata()
|
|
|
|
size := info.ContentLength()
|
|
|
|
if isDirectoryMarker(size, metadata, o.remote) {
|
|
|
|
return fs.ErrorNotAFile
|
|
|
|
}
|
2018-09-07 06:43:40 +02:00
|
|
|
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
|
|
|
// this as base64 encoded string.
|
2018-07-13 17:21:49 +02:00
|
|
|
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
|
|
|
o.mimeType = info.ContentType()
|
2019-01-04 13:32:28 +01:00
|
|
|
o.size = size
|
2019-01-11 18:17:46 +01:00
|
|
|
o.modTime = info.LastModified()
|
2018-07-13 17:21:49 +02:00
|
|
|
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
2019-01-04 13:32:28 +01:00
|
|
|
o.setMetadata(metadata)
|
2018-07-13 17:21:49 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
2019-01-04 13:32:28 +01:00
|
|
|
metadata := info.Metadata
|
|
|
|
size := *info.Properties.ContentLength
|
|
|
|
if isDirectoryMarker(size, metadata, o.remote) {
|
|
|
|
return fs.ErrorNotAFile
|
|
|
|
}
|
2018-09-07 06:43:40 +02:00
|
|
|
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
|
|
|
// this as base64 encoded string.
|
|
|
|
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
2018-07-13 17:21:49 +02:00
|
|
|
o.mimeType = *info.Properties.ContentType
|
2019-01-04 13:32:28 +01:00
|
|
|
o.size = size
|
2018-07-13 17:21:49 +02:00
|
|
|
o.modTime = info.Properties.LastModified
|
|
|
|
o.accessTier = info.Properties.AccessTier
|
2019-01-04 13:32:28 +01:00
|
|
|
o.setMetadata(metadata)
|
2017-07-25 16:18:13 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getBlobReference creates an empty blob reference with no metadata
|
2018-07-13 17:21:49 +02:00
|
|
|
func (o *Object) getBlobReference() azblob.BlobURL {
|
2019-08-16 11:10:56 +02:00
|
|
|
container, directory := o.split()
|
|
|
|
return o.fs.getBlobReference(container, directory)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// clearMetaData clears enough metadata so readMetaData will re-read it
|
|
|
|
func (o *Object) clearMetaData() {
|
|
|
|
o.modTime = time.Time{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readMetaData gets the metadata if it hasn't already been fetched
|
|
|
|
//
|
|
|
|
// Sets
|
|
|
|
// o.id
|
|
|
|
// o.modTime
|
|
|
|
// o.size
|
|
|
|
// o.md5
|
|
|
|
func (o *Object) readMetaData() (err error) {
|
|
|
|
if !o.modTime.IsZero() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
blob := o.getBlobReference()
|
|
|
|
|
|
|
|
// Read metadata (this includes metadata)
|
2018-07-13 17:21:49 +02:00
|
|
|
options := azblob.BlobAccessConditions{}
|
|
|
|
ctx := context.Background()
|
|
|
|
var blobProperties *azblob.BlobGetPropertiesResponse
|
2017-07-25 16:18:13 +02:00
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2018-07-13 17:21:49 +02:00
|
|
|
blobProperties, err = blob.GetProperties(ctx, options)
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
if err != nil {
|
2018-07-13 17:21:49 +02:00
|
|
|
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
2018-07-15 14:49:15 +02:00
|
|
|
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeBlobNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
2017-07-25 16:18:13 +02:00
|
|
|
return fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-13 17:21:49 +02:00
|
|
|
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the object
|
|
|
|
//
|
|
|
|
// It attempts to read the objects mtime and if that isn't present the
|
|
|
|
// LastModified returned in the http headers
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
2017-07-25 16:18:13 +02:00
|
|
|
// The error is logged in readMetaData
|
|
|
|
_ = o.readMetaData()
|
|
|
|
return o.modTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetModTime sets the modification time of the local fs object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
2018-07-13 17:21:49 +02:00
|
|
|
// Make sure o.meta is not nil
|
|
|
|
if o.meta == nil {
|
|
|
|
o.meta = make(map[string]string, 1)
|
|
|
|
}
|
|
|
|
// Set modTimeKey in it
|
|
|
|
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
|
|
|
|
|
|
|
blob := o.getBlobReference()
|
2017-07-25 16:18:13 +02:00
|
|
|
err := o.fs.pacer.Call(func() (bool, error) {
|
2018-07-13 17:21:49 +02:00
|
|
|
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
o.modTime = modTime
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Storable returns if this object is storable
|
|
|
|
func (o *Object) Storable() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open an object for read
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
2018-07-13 17:21:49 +02:00
|
|
|
// Offset and Count for range download
|
|
|
|
var offset int64
|
|
|
|
var count int64
|
2018-08-19 18:53:59 +02:00
|
|
|
if o.AccessTier() == azblob.AccessTierArchive {
|
|
|
|
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
|
|
|
}
|
2019-08-06 16:18:08 +02:00
|
|
|
fs.FixRangeOption(options, o.size)
|
2017-07-25 16:18:13 +02:00
|
|
|
for _, option := range options {
|
|
|
|
switch x := option.(type) {
|
|
|
|
case *fs.RangeOption:
|
2018-07-13 17:21:49 +02:00
|
|
|
offset, count = x.Decode(o.size)
|
|
|
|
if count < 0 {
|
|
|
|
count = o.size - offset
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
case *fs.SeekOption:
|
2018-07-13 17:21:49 +02:00
|
|
|
offset = x.Offset
|
2017-07-25 16:18:13 +02:00
|
|
|
default:
|
|
|
|
if option.Mandatory() {
|
|
|
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
blob := o.getBlobReference()
|
2018-07-13 17:21:49 +02:00
|
|
|
ac := azblob.BlobAccessConditions{}
|
|
|
|
var dowloadResponse *azblob.DownloadResponse
|
2017-07-25 16:18:13 +02:00
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2018-07-13 17:21:49 +02:00
|
|
|
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to open for download")
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
2017-07-25 16:18:13 +02:00
|
|
|
return in, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// dontEncode is the characters that do not need percent-encoding
|
|
|
|
//
|
|
|
|
// The characters that do not need percent-encoding are a subset of
|
|
|
|
// the printable ASCII characters: upper-case letters, lower-case
|
|
|
|
// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
|
|
|
|
// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
|
|
|
|
// be replaced with "%" and the two-digit hex value of the byte.
|
|
|
|
const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
|
|
|
|
`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
|
|
|
|
`0123456789` +
|
|
|
|
`._-/~!$'()*;=:@`)
|
|
|
|
|
|
|
|
// noNeedToEncode is a bitmap of characters which don't need % encoding
|
|
|
|
var noNeedToEncode [256]bool
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
for _, c := range dontEncode {
|
|
|
|
noNeedToEncode[c] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-13 17:21:49 +02:00
|
|
|
// readSeeker joins an io.Reader and an io.Seeker
|
|
|
|
type readSeeker struct {
|
|
|
|
io.Reader
|
|
|
|
io.Seeker
|
|
|
|
}
|
|
|
|
|
2020-05-13 22:29:21 +02:00
|
|
|
// increment the slice passed in as LSB binary
|
|
|
|
func increment(xs []byte) {
|
|
|
|
for i, digit := range xs {
|
|
|
|
newDigit := digit + 1
|
|
|
|
xs[i] = newDigit
|
|
|
|
if newDigit >= digit {
|
|
|
|
// exit if no carry
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var warnStreamUpload sync.Once
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// uploadMultipart uploads a file using multipart upload
|
|
|
|
//
|
|
|
|
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
2020-05-13 22:29:21 +02:00
|
|
|
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
2017-07-25 16:18:13 +02:00
|
|
|
// Calculate correct chunkSize
|
2018-05-14 19:06:57 +02:00
|
|
|
chunkSize := int64(o.fs.opt.ChunkSize)
|
2020-05-13 22:29:21 +02:00
|
|
|
totalParts := -1
|
|
|
|
|
|
|
|
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
|
|
|
// blocks) and this is bigger than the max uncommitted block
|
|
|
|
// size (9.52 TB) so we do not need to part commit block lists
|
|
|
|
// or garbage collect uncommitted blocks.
|
|
|
|
//
|
|
|
|
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
|
|
|
|
|
|
|
|
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
|
|
|
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
|
|
|
|
// 195GB which seems like a not too unreasonable limit.
|
|
|
|
if size == -1 {
|
|
|
|
warnStreamUpload.Do(func() {
|
|
|
|
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
|
|
|
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
// Adjust partSize until the number of parts is small enough.
|
|
|
|
if size/chunkSize >= maxTotalParts {
|
|
|
|
// Calculate partition size rounded up to the nearest MB
|
|
|
|
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
if chunkSize > int64(maxChunkSize) {
|
|
|
|
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
|
|
|
|
}
|
2020-05-13 22:29:21 +02:00
|
|
|
totalParts = int(size / chunkSize)
|
|
|
|
if size%chunkSize != 0 {
|
|
|
|
totalParts++
|
|
|
|
}
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2020-05-13 22:29:21 +02:00
|
|
|
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
2018-07-13 17:21:49 +02:00
|
|
|
|
2018-02-01 17:35:08 +01:00
|
|
|
// unwrap the accounting from the input, we use wrap to put it
|
|
|
|
// back on after the buffering
|
|
|
|
in, wrap := accounting.UnWrap(in)
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// Upload the chunks
|
2020-05-13 22:29:21 +02:00
|
|
|
var (
|
|
|
|
g, gCtx = errgroup.WithContext(ctx)
|
|
|
|
remaining = size // remaining size in file for logging only, -1 if size < 0
|
|
|
|
position = int64(0) // position in file
|
|
|
|
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
|
|
|
|
finished = false // set when we have read EOF
|
|
|
|
blocks []string // list of blocks for finalize
|
|
|
|
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
|
|
|
|
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
|
|
|
|
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
|
|
|
)
|
|
|
|
for part := 0; !finished; part++ {
|
2020-04-25 19:36:18 +02:00
|
|
|
// Get a block of memory from the pool and a token which limits concurrency
|
|
|
|
o.fs.uploadToken.Get()
|
|
|
|
buf := memPool.Get()
|
2017-07-25 16:18:13 +02:00
|
|
|
|
2020-05-13 22:29:21 +02:00
|
|
|
free := func() {
|
2020-04-25 19:36:18 +02:00
|
|
|
memPool.Put(buf) // return the buf
|
|
|
|
o.fs.uploadToken.Put() // return the token
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
2020-05-13 22:29:21 +02:00
|
|
|
// Fail fast, in case an errgroup managed function returns an error
|
|
|
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
|
|
|
if gCtx.Err() != nil {
|
|
|
|
free()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the chunk
|
|
|
|
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
|
|
|
if err == io.EOF {
|
|
|
|
if n == 0 { // end if no data
|
|
|
|
free()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
finished = true
|
|
|
|
} else if err != nil {
|
|
|
|
free()
|
|
|
|
return errors.Wrap(err, "multipart upload failed to read source")
|
|
|
|
}
|
|
|
|
buf = buf[:n]
|
|
|
|
|
|
|
|
// increment the blockID and save the blocks for finalize
|
|
|
|
increment(binaryBlockID)
|
|
|
|
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
|
|
|
blocks = append(blocks, blockID)
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// Transfer the chunk
|
2020-05-13 22:29:21 +02:00
|
|
|
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
|
|
|
g.Go(func() (err error) {
|
|
|
|
defer free()
|
2017-07-25 16:18:13 +02:00
|
|
|
|
2018-09-07 06:43:40 +02:00
|
|
|
// Upload the block, with MD5 for check
|
|
|
|
md5sum := md5.Sum(buf)
|
|
|
|
transactionalMD5 := md5sum[:]
|
2020-05-13 22:29:21 +02:00
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2018-07-13 17:21:49 +02:00
|
|
|
bufferReader := bytes.NewReader(buf)
|
|
|
|
wrappedReader := wrap(bufferReader)
|
|
|
|
rs := readSeeker{wrappedReader, bufferReader}
|
2018-09-07 06:43:40 +02:00
|
|
|
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
if err != nil {
|
2020-05-13 22:29:21 +02:00
|
|
|
return errors.Wrap(err, "multipart upload failed to upload part")
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2020-05-13 22:29:21 +02:00
|
|
|
return nil
|
|
|
|
})
|
2017-07-25 16:18:13 +02:00
|
|
|
|
|
|
|
// ready for next block
|
2020-05-13 22:29:21 +02:00
|
|
|
if size >= 0 {
|
|
|
|
remaining -= chunkSize
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2020-05-13 22:29:21 +02:00
|
|
|
position += chunkSize
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
2020-05-13 22:29:21 +02:00
|
|
|
err = g.Wait()
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finalise the upload session
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2018-07-13 17:21:49 +02:00
|
|
|
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{})
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "multipart upload failed to finalize")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the object with the contents of the io.Reader, modTime and size
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
2019-08-22 22:30:55 +02:00
|
|
|
container, _ := o.split()
|
|
|
|
err = o.fs.makeContainer(ctx, container)
|
2017-07-25 16:18:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
size := src.Size()
|
2018-07-13 17:21:49 +02:00
|
|
|
// Update Mod time
|
2019-06-17 10:34:30 +02:00
|
|
|
o.updateMetadataWithModTime(src.ModTime(ctx))
|
2018-07-13 17:21:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
blob := o.getBlobReference()
|
|
|
|
httpHeaders := azblob.BlobHTTPHeaders{}
|
2019-06-17 10:34:30 +02:00
|
|
|
httpHeaders.ContentType = fs.MimeType(ctx, o)
|
2019-02-28 22:31:07 +01:00
|
|
|
// Compute the Content-MD5 of the file, for multiparts uploads it
|
|
|
|
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
2020-05-25 08:05:53 +02:00
|
|
|
// Note: If multipart, an MD5 checksum will also be computed for each uploaded block
|
2019-02-28 22:31:07 +01:00
|
|
|
// in order to validate its integrity during transport
|
2020-04-23 20:47:14 +02:00
|
|
|
if !o.fs.opt.DisableCheckSum {
|
|
|
|
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
|
|
|
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
|
|
|
if err == nil {
|
|
|
|
httpHeaders.ContentMD5 = sourceMD5bytes
|
|
|
|
} else {
|
|
|
|
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
|
|
|
}
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-13 17:21:49 +02:00
|
|
|
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
2018-10-15 10:01:48 +02:00
|
|
|
BufferSize: int(o.fs.opt.ChunkSize),
|
2018-07-13 17:21:49 +02:00
|
|
|
MaxBuffers: 4,
|
|
|
|
Metadata: o.meta,
|
|
|
|
BlobHTTPHeaders: httpHeaders,
|
|
|
|
}
|
2018-10-15 10:01:48 +02:00
|
|
|
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
|
|
|
// is merged the SDK can't upload a single blob of exactly the chunk
|
|
|
|
// size, so upload with a multpart upload to work around.
|
2019-07-28 19:47:38 +02:00
|
|
|
// See: https://github.com/rclone/rclone/issues/2653
|
2020-05-13 22:29:21 +02:00
|
|
|
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
2018-10-15 10:01:48 +02:00
|
|
|
if size == int64(o.fs.opt.ChunkSize) {
|
|
|
|
multipartUpload = true
|
|
|
|
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
|
|
|
}
|
2018-07-13 17:21:49 +02:00
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// Don't retry, return a retry error instead
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
2018-10-15 10:01:48 +02:00
|
|
|
if multipartUpload {
|
2017-07-25 16:18:13 +02:00
|
|
|
// If a large file upload in chunks
|
2020-05-13 22:29:21 +02:00
|
|
|
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
|
2017-07-25 16:18:13 +02:00
|
|
|
} else {
|
|
|
|
// Write a small blob in one transaction
|
2018-07-13 17:21:49 +02:00
|
|
|
blockBlobURL := blob.ToBlockBlobURL()
|
|
|
|
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-08-19 18:53:59 +02:00
|
|
|
// Refresh metadata on object
|
2017-07-25 16:18:13 +02:00
|
|
|
o.clearMetaData()
|
2018-08-19 18:53:59 +02:00
|
|
|
err = o.readMetaData()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation
|
|
|
|
if o.fs.opt.AccessTier == string(defaultAccessTier) || o.fs.opt.AccessTier == string(o.AccessTier()) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, set blob tier based on configured access tier
|
2018-09-11 03:55:06 +02:00
|
|
|
return o.SetTier(o.fs.opt.AccessTier)
|
2017-07-25 16:18:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove an object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
2017-07-25 16:18:13 +02:00
|
|
|
blob := o.getBlobReference()
|
2018-07-13 17:21:49 +02:00
|
|
|
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
|
|
|
ac := azblob.BlobAccessConditions{}
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.fs.pacer.Call(func() (bool, error) {
|
2018-07-13 17:21:49 +02:00
|
|
|
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// MimeType of an Object if known, "" otherwise
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) MimeType(ctx context.Context) string {
|
2017-07-25 16:18:13 +02:00
|
|
|
return o.mimeType
|
|
|
|
}
|
|
|
|
|
2018-08-19 18:53:59 +02:00
|
|
|
// AccessTier of an object, default is of type none
|
|
|
|
func (o *Object) AccessTier() azblob.AccessTierType {
|
|
|
|
return o.accessTier
|
|
|
|
}
|
|
|
|
|
2018-09-11 03:55:06 +02:00
|
|
|
// SetTier performs changing object tier
|
|
|
|
func (o *Object) SetTier(tier string) error {
|
|
|
|
if !validateAccessTier(tier) {
|
|
|
|
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if current tier already matches with desired tier
|
|
|
|
if o.GetTier() == tier {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
desiredAccessTier := azblob.AccessTierType(tier)
|
|
|
|
blob := o.getBlobReference()
|
|
|
|
ctx := context.Background()
|
|
|
|
err := o.fs.pacer.Call(func() (bool, error) {
|
2018-10-25 17:35:30 +02:00
|
|
|
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
2018-09-11 03:55:06 +02:00
|
|
|
return o.fs.shouldRetry(err)
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Failed to set Blob Tier")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set access tier on local object also, this typically
|
|
|
|
// gets updated on get blob properties
|
|
|
|
o.accessTier = desiredAccessTier
|
|
|
|
fs.Debugf(o, "Successfully changed object tier to %s", tier)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetTier returns object tier in azure as string
|
|
|
|
func (o *Object) GetTier() string {
|
|
|
|
return string(o.accessTier)
|
|
|
|
}
|
|
|
|
|
2017-07-25 16:18:13 +02:00
|
|
|
// Check the interfaces are satisfied
|
|
|
|
var (
|
2020-05-13 22:29:21 +02:00
|
|
|
_ fs.Fs = &Fs{}
|
|
|
|
_ fs.Copier = &Fs{}
|
|
|
|
_ fs.PutStreamer = &Fs{}
|
|
|
|
_ fs.Purger = &Fs{}
|
|
|
|
_ fs.ListRer = &Fs{}
|
|
|
|
_ fs.Object = &Object{}
|
|
|
|
_ fs.MimeTyper = &Object{}
|
|
|
|
_ fs.GetTierer = &Object{}
|
|
|
|
_ fs.SetTierer = &Object{}
|
2017-07-25 16:18:13 +02:00
|
|
|
)
|