azureblob: port old authentication methods to new SDK

Co-authored-by: Brad Ackerman <brad@facefault.org>
This commit is contained in:
Nick Craig-Wood 2022-11-29 15:43:22 +00:00
parent a131da2c35
commit f746b2fe85

View File

@ -36,11 +36,14 @@ import (
"crypto/md5" "crypto/md5"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@ -48,11 +51,13 @@ import (
"time" "time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
@ -66,6 +71,7 @@ import (
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@ -104,7 +110,32 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.", Help: `Storage Account Name.
Leave blank to use SAS URL or Emulator.
If this is blank then if env_auth is set it will be read from the
environment variable AZURE_CLIENT_ID.
`,
}, {
Name: "env_auth",
Help: `Read credentials from runtime (environment variables).
Pull credentials from AZURE_TENANT_ID and AZURE_CLIENT_{ID,SECRET} environment vars.
See EnvironmentCredential in the Azure docs for more info.
Other authentication methods will, if specified, override this flag.`,
Default: false,
}, {
Name: "key",
Help: `Storage Account Shared Key.
Leave blank to use SAS URL or Emulator.`,
}, {
Name: "sas_url",
Help: `SAS URL for container level access only.
Leave blank if using account/key or Emulator.`,
}, { }, {
Name: "service_principal_file", Name: "service_principal_file",
Help: `Path to file containing credentials for use with a service principal. Help: `Path to file containing credentials for use with a service principal.
@ -118,21 +149,7 @@ Leave blank normally. Needed only if you want to use a service principal instead
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details. See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
`, `,
}, { Advanced: true,
Name: "env_auth",
Help: `Read credentials from runtime (environment variables).
Pull credentials from AZURE_TENANT_ID and AZURE_CLIENT_{ID,SECRET} environment vars.
See EnvironmentCredential in the Azure docs for more info.
Other authentication methods will, if specified, override this flag.`,
Default: false,
}, {
Name: "key",
Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
}, {
Name: "sas_url",
Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
}, { }, {
Name: "use_msi", Name: "use_msi",
Help: `Use a managed service identity to authenticate (only works in Azure). Help: `Use a managed service identity to authenticate (only works in Azure).
@ -146,6 +163,7 @@ the user-assigned identity will be used by default. If the resource has multiple
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id, identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
msi_client_id, or msi_mi_res_id parameters.`, msi_client_id, or msi_mi_res_id parameters.`,
Default: false, Default: false,
Advanced: true,
}, { }, {
Name: "msi_object_id", Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
@ -162,6 +180,7 @@ msi_client_id, or msi_mi_res_id parameters.`,
Name: "use_emulator", Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.", Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
Default: false, Default: false,
Advanced: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
@ -307,9 +326,9 @@ This option controls how often unused buffers will be removed from the pool.`,
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Account string `config:"account"` Account string `config:"account"`
ServicePrincipalFile string `config:"service_principal_file"`
EnvAuth bool `config:"env_auth"` EnvAuth bool `config:"env_auth"`
Key string `config:"key"` Key string `config:"key"`
ServicePrincipalFile string `config:"service_principal_file"`
UseMSI bool `config:"use_msi"` UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"` MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"` MSIClientID string `config:"msi_client_id"`
@ -485,6 +504,20 @@ type servicePrincipalCredentials struct {
Tenant string `json:"tenant"` Tenant string `json:"tenant"`
} }
// parseServicePrincipalCredentials unmarshals a service principal credentials JSON file as generated by az cli.
func parseServicePrincipalCredentials(ctx context.Context, credentialsData []byte) (*servicePrincipalCredentials, error) {
var spCredentials servicePrincipalCredentials
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
}
// TODO: support certificate credentials
// Validate all fields present
if spCredentials.AppID == "" || spCredentials.Password == "" || spCredentials.Tenant == "" {
return nil, fmt.Errorf("missing fields in credentials file")
}
return &spCredentials, nil
}
// setRoot changes the root of the Fs // setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) { func (f *Fs) setRoot(root string) {
f.root = parsePath(root) f.root = parsePath(root)
@ -524,9 +557,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ListChunkSize > maxListChunkSize { if opt.ListChunkSize > maxListChunkSize {
return nil, fmt.Errorf("blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) return nil, fmt.Errorf("blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
} }
if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL
}
if opt.AccessTier == "" { if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier) opt.AccessTier = string(defaultAccessTier)
@ -568,236 +598,150 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
GetTier: true, GetTier: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// var ( // Client options specifying our own transport
// u *url.URL policyClientOptions := policy.ClientOptions{
// serviceURL azblob.ServiceURL Transport: newTransporter(ctx),
// ) }
// switch { clientOpt := service.ClientOptions{
// case opt.UseEmulator: ClientOptions: policyClientOptions,
// var actualEmulatorAccount = emulatorAccount
// if opt.Account != "" {
// actualEmulatorAccount = opt.Account
// }
// var actualEmulatorKey = emulatorAccountKey
// if opt.Key != "" {
// actualEmulatorKey = opt.Key
// }
// credential, err := azblob.NewSharedKeyCredential(actualEmulatorAccount, actualEmulatorKey)
// if err != nil {
// return nil, fmt.Errorf("failed to parse credentials: %w", err)
// }
// var actualEmulatorEndpoint = emulatorBlobEndpoint
// if opt.Endpoint != "" {
// actualEmulatorEndpoint = opt.Endpoint
// }
// u, err = url.Parse(actualEmulatorEndpoint)
// if err != nil {
// return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
// }
// pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
// serviceURL = azblob.NewServiceURL(*u, pipeline)
// case opt.UseMSI:
// var token adal.Token
// var userMSI = &userMSI{}
// if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
// // Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// // Validate and ensure exactly one is set. (To do: better validation.)
// if len(opt.MSIClientID) > 0 {
// if len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
// return nil, errors.New("more than one user-assigned identity ID is set")
// }
// userMSI.Type = msiClientID
// userMSI.Value = opt.MSIClientID
// }
// if len(opt.MSIObjectID) > 0 {
// if len(opt.MSIClientID) > 0 || len(opt.MSIResourceID) > 0 {
// return nil, errors.New("more than one user-assigned identity ID is set")
// }
// userMSI.Type = msiObjectID
// userMSI.Value = opt.MSIObjectID
// }
// if len(opt.MSIResourceID) > 0 {
// if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 {
// return nil, errors.New("more than one user-assigned identity ID is set")
// }
// userMSI.Type = msiResourceID
// userMSI.Value = opt.MSIResourceID
// }
// } else {
// userMSI = nil
// }
// err = f.imdsPacer.Call(func() (bool, error) {
// // Retry as specified by the documentation:
// // https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance
// token, err = GetMSIToken(ctx, userMSI)
// return f.shouldRetry(ctx, err)
// })
// if err != nil {
// return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
// }
// u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
// if err != nil {
// return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
// }
// credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
// fs.Debugf(f, "Token refresher called.")
// var refreshedToken adal.Token
// err := f.imdsPacer.Call(func() (bool, error) {
// refreshedToken, err = GetMSIToken(ctx, userMSI)
// return f.shouldRetry(ctx, err)
// })
// if err != nil {
// // Failed to refresh.
// return 0
// }
// credential.SetToken(refreshedToken.AccessToken)
// now := time.Now().UTC()
// // Refresh one minute before expiry.
// refreshAt := refreshedToken.Expires().UTC().Add(-1 * time.Minute)
// fs.Debugf(f, "Acquired new token that expires at %v; refreshing in %d s", refreshedToken.Expires(),
// int(refreshAt.Sub(now).Seconds()))
// if now.After(refreshAt) {
// // Acquired a causality violation.
// return 0
// }
// return refreshAt.Sub(now)
// })
// pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
// serviceURL = azblob.NewServiceURL(*u, pipeline)
// case opt.Account != "" && opt.Key != "":
// credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
// if err != nil {
// return nil, fmt.Errorf("failed to parse credentials: %w", err)
// }
// u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
// if err != nil {
// return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
// }
// pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
// serviceURL = azblob.NewServiceURL(*u, pipeline)
// case opt.SASURL != "":
// u, err = url.Parse(opt.SASURL)
// if err != nil {
// return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
// }
// // use anonymous credentials in case of sas url
// pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
// // Check if we have container level SAS or account level sas
// parts := azblob.NewBlobURLParts(*u)
// if parts.ContainerName != "" {
// if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
// return nil, errors.New("container name in SAS URL and container provided in command do not match")
// }
// containerURL := azblob.NewContainerURL(*u, pipeline)
// f.cntSVCcache[parts.ContainerName] = &containerURL
// f.isLimited = true
// } else {
// serviceURL = azblob.NewServiceURL(*u, pipeline)
// }
// case opt.ServicePrincipalFile != "":
// // Create a standard URL.
// u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
// if err != nil {
// return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
// }
// // Try loading service principal credentials from file.
// loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
// if err != nil {
// return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
// }
// // Create a token refresher from service principal credentials.
// tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
// if err != nil {
// return nil, fmt.Errorf("failed to create a service principal token: %w", err)
// }
// options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
// pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
// serviceURL = azblob.NewServiceURL(*u, pipe)
// default:
// return nil, errors.New("no authentication method configured")
// }
//f.svcURL = &serviceURL
u, err := url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
} }
serviceURL := u.String()
fs.Debugf(f, "Service URL = %q", serviceURL)
// FIXME Very quick and dirty auth
// Here we auth by setting one of cred, sharedKeyCred or f.svc
var ( var (
cred azcore.TokenCredential cred azcore.TokenCredential
sharedKeyCred *service.SharedKeyCredential sharedKeyCred *service.SharedKeyCredential
) )
switch {
if opt.EnvAuth { case opt.EnvAuth:
// Read credentials from the environment // Read account from environment if needed
cred, err = azidentity.NewDefaultAzureCredential(nil) if opt.Account == "" {
if err != nil { opt.Account, _ = os.LookupEnv("AZURE_CLIENT_ID")
return nil, fmt.Errorf("create default azure credential failed: %w", err)
} }
} else { // Read credentials from the environment
options := azidentity.DefaultAzureCredentialOptions{
// Use the config file to configure ClientOptions: policyClientOptions,
}
cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil {
return nil, fmt.Errorf("create azure enviroment credential failed: %w", err)
}
case opt.UseEmulator:
if opt.Account != "" {
opt.Account = emulatorAccount
}
if opt.Key == "" {
opt.Key = emulatorAccountKey
}
if opt.Endpoint != "" {
opt.Endpoint = emulatorBlobEndpoint
}
sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
return nil, fmt.Errorf("create new shared key credential for emulator failed: %w", err)
}
case opt.UseMSI:
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
var b2i = map[bool]int{false: 0, true: 1}
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
if set > 1 {
return nil, errors.New("more than one user-assigned identity ID is set")
}
var options azidentity.ManagedIdentityCredentialOptions
switch {
case opt.MSIClientID != "":
options.ID = azidentity.ClientID(opt.MSIClientID)
case opt.MSIObjectID != "":
// FIXME this doesn't appear to be in the new SDK?
return nil, fmt.Errorf("MSI object ID is currently unsupported")
case opt.MSIResourceID != "":
options.ID = azidentity.ResourceID(opt.MSIResourceID)
}
cred, err = azidentity.NewManagedIdentityCredential(&options)
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
case opt.Account != "" && opt.Key != "":
sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key) sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil { if err != nil {
return nil, fmt.Errorf("create new shared key credential failed: %w", err) return nil, fmt.Errorf("create new shared key credential failed: %w", err)
} }
case opt.SASURL != "":
parts, err := sas.ParseURL(opt.SASURL)
if err != nil {
return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
}
endpoint := opt.SASURL
containerName := parts.ContainerName
// Check if we have container level SAS or account level SAS
if containerName != "" {
// Container level SAS
if f.rootContainer != "" && containerName != f.rootContainer {
return nil, fmt.Errorf("container name in SAS URL (%q) and container provided in command (%q) do not match", containerName, f.rootContainer)
}
// Rewrite the endpoint string to be without the container
parts.ContainerName = ""
endpoint = parts.String()
}
f.svc, err = service.NewClientWithNoCredential(endpoint, &clientOpt)
if err != nil {
return nil, fmt.Errorf("unable to create SAS URL client: %w", err)
}
// if using Container level SAS put the container client into the cache
if containerName != "" {
_ = f.cntSVC(containerName)
f.isLimited = true
}
case opt.ServicePrincipalFile != "":
// Try loading service principal credentials from file.
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
if err != nil {
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
}
parsedCreds, err := parseServicePrincipalCredentials(ctx, loadedCreds)
if err != nil {
return nil, fmt.Errorf("error parsing service principal credentials file: %w", err)
}
options := azidentity.ClientSecretCredentialOptions{
ClientOptions: policyClientOptions,
}
cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options)
if err != nil {
return nil, fmt.Errorf("error creating a client secret credential: %w", err)
}
default:
return nil, errors.New("no authentication method configured")
} }
// Specify our own transport // Make the client if not already created
clientOpt := service.ClientOptions{ if f.svc == nil {
ClientOptions: azcore.ClientOptions{ // Work out what the endpoint is if it is still unset
Transport: newTransporter(ctx), if opt.Endpoint == "" {
}, if opt.Account == "" {
return nil, fmt.Errorf("account must be set: can't make service URL")
}
u, err := url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, storageDefaultBaseURL))
if err != nil {
return nil, fmt.Errorf("failed to make azure storage URL from account and endpoint: %w", err)
}
opt.Endpoint = u.String()
} }
// azClientOpt := azblob.ClientOptions{
// ClientOptions: azcore.ClientOptions{
// Transport: clientOpt.ClientOptions.Transport,
// },
// }
if sharedKeyCred != nil { if sharedKeyCred != nil {
// create a client for the specified storage account // Shared key cred
client, err := service.NewClientWithSharedKeyCredential(serviceURL, sharedKeyCred, &clientOpt) f.svc, err = service.NewClientWithSharedKeyCredential(opt.Endpoint, sharedKeyCred, &clientOpt)
if err != nil { if err != nil {
return nil, fmt.Errorf("create client with shared key failed: %w", err) return nil, fmt.Errorf("create client with shared key failed: %w", err)
} }
f.svc = client } else if cred != nil {
// Azidentity cred
// create a client for the specified storage account f.svc, err = service.NewClient(opt.Endpoint, cred, &clientOpt)
//
// Annoyingly this is the same type as f.svc just wrapped in a
// struct, but there is no way to create one from the other.
// azsvc, err := azblob.NewClientWithSharedKeyCredential(serviceURL, sharedKeyCred, &azClientOpt)
// if err != nil {
// return nil, fmt.Errorf("create client failed: %w", err)
// }
// f.azsvc = azsvc
} else {
// create a client for the specified storage account
// azblob.ClientOptions{}
client, err := service.NewClient(serviceURL, cred, &clientOpt)
if err != nil { if err != nil {
return nil, fmt.Errorf("create client failed: %w", err) return nil, fmt.Errorf("create client failed: %w", err)
} }
f.svc = client }
}
// create a client for the specified storage account if f.svc == nil {
// azblob.ClientOptions{} return nil, fmt.Errorf("internal error: auth failed to make credentials or client")
//
// Annoyingly this is the same type as f.svc just wrapped in a
// struct, but there is no way to create one from the other.
// azsvc, err := azblob.NewClient(serviceURL, cred, &azClientOpt)
// if err != nil {
// return nil, fmt.Errorf("create client failed: %w", err)
// }
// f.azsvc = azsvc
} }
if f.rootContainer != "" && f.rootDirectory != "" { if f.rootContainer != "" && f.rootDirectory != "" {