mirror of
https://github.com/rclone/rclone.git
synced 2025-01-03 12:59:32 +01:00
imagekit: Added ImageKit backend
This commit is contained in:
parent
fd2322cb41
commit
36eb3cd660
@ -25,6 +25,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
|
66
backend/imagekit/client/client.go
Normal file
66
backend/imagekit/client/client.go
Normal file
@ -0,0 +1,66 @@
|
||||
// Package client provides a client for interacting with the ImageKit API.
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// ImageKit main struct
|
||||
type ImageKit struct {
|
||||
Prefix string
|
||||
UploadPrefix string
|
||||
Timeout int64
|
||||
UploadTimeout int64
|
||||
PrivateKey string
|
||||
PublicKey string
|
||||
URLEndpoint string
|
||||
HTTPClient *rest.Client
|
||||
}
|
||||
|
||||
// NewParams is a struct to define parameters to imagekit
|
||||
type NewParams struct {
|
||||
PrivateKey string
|
||||
PublicKey string
|
||||
URLEndpoint string
|
||||
}
|
||||
|
||||
// New returns ImageKit object from environment variables
|
||||
func New(ctx context.Context, params NewParams) (*ImageKit, error) {
|
||||
|
||||
privateKey := params.PrivateKey
|
||||
publicKey := params.PublicKey
|
||||
endpointURL := params.URLEndpoint
|
||||
|
||||
switch {
|
||||
case privateKey == "":
|
||||
return nil, fmt.Errorf("ImageKit.io URL endpoint is required")
|
||||
case publicKey == "":
|
||||
return nil, fmt.Errorf("ImageKit.io public key is required")
|
||||
case endpointURL == "":
|
||||
return nil, fmt.Errorf("ImageKit.io private key is required")
|
||||
}
|
||||
|
||||
cliCtx, cliCfg := fs.AddConfig(ctx)
|
||||
|
||||
cliCfg.UserAgent = "rclone/imagekit"
|
||||
client := rest.NewClient(fshttp.NewClient(cliCtx))
|
||||
|
||||
client.SetUserPass(privateKey, "")
|
||||
client.SetHeader("Accept", "application/json")
|
||||
|
||||
return &ImageKit{
|
||||
Prefix: "https://api.imagekit.io/v2",
|
||||
UploadPrefix: "https://upload.imagekit.io/api/v2",
|
||||
Timeout: 60,
|
||||
UploadTimeout: 3600,
|
||||
PrivateKey: params.PrivateKey,
|
||||
PublicKey: params.PublicKey,
|
||||
URLEndpoint: params.URLEndpoint,
|
||||
HTTPClient: client,
|
||||
}, nil
|
||||
}
|
252
backend/imagekit/client/media.go
Normal file
252
backend/imagekit/client/media.go
Normal file
@ -0,0 +1,252 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"gopkg.in/validator.v2"
|
||||
)
|
||||
|
||||
// FilesOrFolderParam struct is a parameter type to ListFiles() function to search / list media library files.
|
||||
type FilesOrFolderParam struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
Skip int `json:"skip,omitempty"`
|
||||
SearchQuery string `json:"searchQuery,omitempty"`
|
||||
}
|
||||
|
||||
// AITag represents an AI tag for a media library file.
|
||||
type AITag struct {
|
||||
Name string `json:"name"`
|
||||
Confidence float32 `json:"confidence"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
// File represents media library File details.
|
||||
type File struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"name"`
|
||||
FilePath string `json:"filePath"`
|
||||
Type string `json:"type"`
|
||||
VersionInfo map[string]string `json:"versionInfo"`
|
||||
IsPrivateFile *bool `json:"isPrivateFile"`
|
||||
CustomCoordinates *string `json:"customCoordinates"`
|
||||
URL string `json:"url"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
FileType string `json:"fileType"`
|
||||
Mime string `json:"mime"`
|
||||
Height int `json:"height"`
|
||||
Width int `json:"Width"`
|
||||
Size uint64 `json:"size"`
|
||||
HasAlpha bool `json:"hasAlpha"`
|
||||
CustomMetadata map[string]any `json:"customMetadata,omitempty"`
|
||||
EmbeddedMetadata map[string]any `json:"embeddedMetadata"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
Tags []string `json:"tags"`
|
||||
AITags []AITag `json:"AITags"`
|
||||
}
|
||||
|
||||
// Folder represents media library Folder details.
|
||||
type Folder struct {
|
||||
*File
|
||||
FolderPath string `json:"folderPath"`
|
||||
}
|
||||
|
||||
// CreateFolderParam represents parameter to create folder api
|
||||
type CreateFolderParam struct {
|
||||
FolderName string `validate:"nonzero" json:"folderName"`
|
||||
ParentFolderPath string `validate:"nonzero" json:"parentFolderPath"`
|
||||
}
|
||||
|
||||
// DeleteFolderParam represents parameter to delete folder api
|
||||
type DeleteFolderParam struct {
|
||||
FolderPath string `validate:"nonzero" json:"folderPath"`
|
||||
}
|
||||
|
||||
// MoveFolderParam represents parameter to move folder api
|
||||
type MoveFolderParam struct {
|
||||
SourceFolderPath string `validate:"nonzero" json:"sourceFolderPath"`
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
// JobStatus represents response Data to job status api
|
||||
type JobStatus struct {
|
||||
JobID string `json:"jobId"`
|
||||
Type string `json:"type"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// File represents media library File details.
|
||||
func (ik *ImageKit) File(ctx context.Context, fileID string) (*http.Response, *File, error) {
|
||||
data := &File{}
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: fmt.Sprintf("/files/%s/details", fileID),
|
||||
RootURL: ik.Prefix,
|
||||
IgnoreStatus: true,
|
||||
}, nil, data)
|
||||
|
||||
return response, data, err
|
||||
}
|
||||
|
||||
// Files retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
||||
func (ik *ImageKit) Files(ctx context.Context, params FilesOrFolderParam, includeVersion bool) (*http.Response, *[]File, error) {
|
||||
var SearchQuery = `type = "file"`
|
||||
|
||||
if includeVersion {
|
||||
SearchQuery = `type IN ["file", "file-version"]`
|
||||
}
|
||||
if params.SearchQuery != "" {
|
||||
SearchQuery = params.SearchQuery
|
||||
}
|
||||
|
||||
parameters := url.Values{}
|
||||
|
||||
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
||||
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
||||
parameters.Set("path", params.Path)
|
||||
parameters.Set("searchQuery", SearchQuery)
|
||||
|
||||
data := &[]File{}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/files",
|
||||
RootURL: ik.Prefix,
|
||||
Parameters: parameters,
|
||||
}, nil, data)
|
||||
|
||||
return response, data, err
|
||||
}
|
||||
|
||||
// DeleteFile removes file by FileID from media library
|
||||
func (ik *ImageKit) DeleteFile(ctx context.Context, fileID string) (*http.Response, error) {
|
||||
var err error
|
||||
|
||||
if fileID == "" {
|
||||
return nil, errors.New("fileID can not be empty")
|
||||
}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: fmt.Sprintf("/files/%s", fileID),
|
||||
RootURL: ik.Prefix,
|
||||
NoResponse: true,
|
||||
}, nil, nil)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
// Folders retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
||||
func (ik *ImageKit) Folders(ctx context.Context, params FilesOrFolderParam) (*http.Response, *[]Folder, error) {
|
||||
var SearchQuery = `type = "folder"`
|
||||
|
||||
if params.SearchQuery != "" {
|
||||
SearchQuery = params.SearchQuery
|
||||
}
|
||||
|
||||
parameters := url.Values{}
|
||||
|
||||
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
||||
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
||||
parameters.Set("path", params.Path)
|
||||
parameters.Set("searchQuery", SearchQuery)
|
||||
|
||||
data := &[]Folder{}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/files",
|
||||
RootURL: ik.Prefix,
|
||||
Parameters: parameters,
|
||||
}, nil, data)
|
||||
|
||||
if err != nil {
|
||||
return resp, data, err
|
||||
}
|
||||
|
||||
return resp, data, err
|
||||
}
|
||||
|
||||
// CreateFolder creates a new folder in media library
|
||||
func (ik *ImageKit) CreateFolder(ctx context.Context, param CreateFolderParam) (*http.Response, error) {
|
||||
var err error
|
||||
|
||||
if err = validator.Validate(¶m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder",
|
||||
RootURL: ik.Prefix,
|
||||
NoResponse: true,
|
||||
}, param, nil)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
// DeleteFolder removes the folder from media library
|
||||
func (ik *ImageKit) DeleteFolder(ctx context.Context, param DeleteFolderParam) (*http.Response, error) {
|
||||
var err error
|
||||
|
||||
if err = validator.Validate(¶m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/folder",
|
||||
RootURL: ik.Prefix,
|
||||
NoResponse: true,
|
||||
}, param, nil)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
// MoveFolder moves given folder to new path in media library
|
||||
func (ik *ImageKit) MoveFolder(ctx context.Context, param MoveFolderParam) (*http.Response, *JobIDResponse, error) {
|
||||
var err error
|
||||
var response = &JobIDResponse{}
|
||||
|
||||
if err = validator.Validate(¶m); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "bulkJobs/moveFolder",
|
||||
RootURL: ik.Prefix,
|
||||
}, param, response)
|
||||
|
||||
return resp, response, err
|
||||
}
|
||||
|
||||
// BulkJobStatus retrieves the status of a bulk job by job ID.
|
||||
func (ik *ImageKit) BulkJobStatus(ctx context.Context, jobID string) (*http.Response, *JobStatus, error) {
|
||||
var err error
|
||||
var response = &JobStatus{}
|
||||
|
||||
if jobID == "" {
|
||||
return nil, nil, errors.New("jobId can not be blank")
|
||||
}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "bulkJobs/" + jobID,
|
||||
RootURL: ik.Prefix,
|
||||
}, nil, response)
|
||||
|
||||
return resp, response, err
|
||||
}
|
96
backend/imagekit/client/upload.go
Normal file
96
backend/imagekit/client/upload.go
Normal file
@ -0,0 +1,96 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// UploadParam defines upload parameters
|
||||
type UploadParam struct {
|
||||
FileName string `json:"fileName"`
|
||||
Folder string `json:"folder,omitempty"` // default value: /
|
||||
Tags string `json:"tags,omitempty"`
|
||||
IsPrivateFile *bool `json:"isPrivateFile,omitempty"` // default: false
|
||||
}
|
||||
|
||||
// UploadResult defines the response structure for the upload API
|
||||
type UploadResult struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
ThumbnailURL string `json:"thumbnailUrl"`
|
||||
Height int `json:"height"`
|
||||
Width int `json:"Width"`
|
||||
Size uint64 `json:"size"`
|
||||
FilePath string `json:"filePath"`
|
||||
AITags []map[string]any `json:"AITags"`
|
||||
VersionInfo map[string]string `json:"versionInfo"`
|
||||
}
|
||||
|
||||
// Upload uploads an asset to a imagekit account.
|
||||
//
|
||||
// The asset can be:
|
||||
// - the actual data (io.Reader)
|
||||
// - the Data URI (Base64 encoded), max ~60 MB (62,910,000 chars)
|
||||
// - the remote FTP, HTTP or HTTPS URL address of an existing file
|
||||
//
|
||||
// https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload
|
||||
func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadParam) (*http.Response, *UploadResult, error) {
|
||||
var err error
|
||||
|
||||
if param.FileName == "" {
|
||||
return nil, nil, errors.New("Upload: Filename is required")
|
||||
}
|
||||
|
||||
// Initialize URL values
|
||||
formParams := url.Values{}
|
||||
|
||||
formParams.Add("useUniqueFileName", fmt.Sprint(false))
|
||||
|
||||
// Add individual fields to URL values
|
||||
if param.FileName != "" {
|
||||
formParams.Add("fileName", param.FileName)
|
||||
}
|
||||
|
||||
if param.Tags != "" {
|
||||
formParams.Add("tags", param.Tags)
|
||||
}
|
||||
|
||||
if param.Folder != "" {
|
||||
formParams.Add("folder", param.Folder)
|
||||
}
|
||||
|
||||
if param.IsPrivateFile != nil {
|
||||
formParams.Add("isPrivateFile", fmt.Sprintf("%v", *param.IsPrivateFile))
|
||||
}
|
||||
|
||||
response := &UploadResult{}
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload",
|
||||
RootURL: ik.UploadPrefix,
|
||||
Body: formReader,
|
||||
ContentType: contentType,
|
||||
}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &opts, nil, response)
|
||||
|
||||
if err != nil {
|
||||
return resp, response, err
|
||||
}
|
||||
|
||||
return resp, response, err
|
||||
}
|
72
backend/imagekit/client/url.go
Normal file
72
backend/imagekit/client/url.go
Normal file
@ -0,0 +1,72 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// URLParam represents parameters for generating url
|
||||
type URLParam struct {
|
||||
Path string
|
||||
Src string
|
||||
URLEndpoint string
|
||||
Signed bool
|
||||
ExpireSeconds int64
|
||||
QueryParameters map[string]string
|
||||
}
|
||||
|
||||
// URL generates url from URLParam
|
||||
func (ik *ImageKit) URL(params URLParam) (string, error) {
|
||||
var resultURL string
|
||||
var url *neturl.URL
|
||||
var err error
|
||||
var endpoint = params.URLEndpoint
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = ik.URLEndpoint
|
||||
}
|
||||
|
||||
endpoint = strings.TrimRight(endpoint, "/") + "/"
|
||||
|
||||
if params.QueryParameters == nil {
|
||||
params.QueryParameters = make(map[string]string)
|
||||
}
|
||||
|
||||
if url, err = neturl.Parse(params.Src); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
query := url.Query()
|
||||
|
||||
for k, v := range params.QueryParameters {
|
||||
query.Set(k, v)
|
||||
}
|
||||
url.RawQuery = query.Encode()
|
||||
resultURL = url.String()
|
||||
|
||||
if params.Signed {
|
||||
now := time.Now().Unix()
|
||||
|
||||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path = path + expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
||||
if strings.Contains(resultURL, "?") {
|
||||
resultURL = resultURL + "&" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
|
||||
} else {
|
||||
resultURL = resultURL + "?" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
|
||||
}
|
||||
}
|
||||
|
||||
return resultURL, nil
|
||||
}
|
828
backend/imagekit/imagekit.go
Normal file
828
backend/imagekit/imagekit.go
Normal file
@ -0,0 +1,828 @@
|
||||
// Package imagekit provides an interface to the ImageKit.io media library.
|
||||
package imagekit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/imagekit/client"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 1 * time.Millisecond
|
||||
maxSleep = 100 * time.Millisecond
|
||||
decayConstant = 2
|
||||
)
|
||||
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation) read from Last-Modified header",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"size": {
|
||||
Help: "Size of the object in bytes",
|
||||
Type: "int64",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"file-type": {
|
||||
Help: "Type of the file",
|
||||
Type: "string",
|
||||
Example: "image",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"height": {
|
||||
Help: "Height of the image or video in pixels",
|
||||
Type: "int",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"width": {
|
||||
Help: "Width of the image or video in pixels",
|
||||
Type: "int",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"has-alpha": {
|
||||
Help: "Whether the image has alpha channel or not",
|
||||
Type: "bool",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"tags": {
|
||||
Help: "Tags associated with the file",
|
||||
Type: "string",
|
||||
Example: "tag1,tag2",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"google-tags": {
|
||||
Help: "AI generated tags by Google Cloud Vision associated with the image",
|
||||
Type: "string",
|
||||
Example: "tag1,tag2",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"aws-tags": {
|
||||
Help: "AI generated tags by AWS Rekognition associated with the image",
|
||||
Type: "string",
|
||||
Example: "tag1,tag2",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"is-private-file": {
|
||||
Help: "Whether the file is private or not",
|
||||
Type: "bool",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"custom-coordinates": {
|
||||
Help: "Custom coordinates of the file",
|
||||
Type: "string",
|
||||
Example: "0,0,100,100",
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "imagekit",
|
||||
Description: "ImageKit.io",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "public_key",
|
||||
Help: "You can find your ImageKit.io public key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "private_key",
|
||||
Help: "You can find your ImageKit.io private key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "only_signed",
|
||||
Help: "If you have configured `Restrict unsigned image URLs` in your dashboard settings, set this to true.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "upload_tags",
|
||||
Help: "Tags to add to the uploaded files, e.g. \"tag1,tag2\".",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.EncodeZero |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeHashPercent |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeDot |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeSquareBracket |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
PublicKey string `config:"public_key"`
|
||||
PrivateKey string `config:"private_key"`
|
||||
OnlySigned bool `config:"only_signed"`
|
||||
Versions bool `config:"versions"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to ImageKit
|
||||
type Fs struct {
|
||||
name string // name of remote
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
ik *client.ImageKit // ImageKit client
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a ImageKit file
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path
|
||||
filePath string // The path to the file
|
||||
contentType string // The content type of the object if known - may be ""
|
||||
timestamp time.Time // The timestamp of the object if known - may be zero
|
||||
file client.File // The media file if known - may be nil
|
||||
versionID string // If present this points to an object version
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ik, err := client.New(ctx, client.NewParams{
|
||||
URLEndpoint: opt.Endpoint,
|
||||
PublicKey: opt.PublicKey,
|
||||
PrivateKey: opt.PrivateKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ik: ik,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.root = path.Join("/", root)
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: false,
|
||||
ServerSideAcrossConfigs: false,
|
||||
IsLocal: false,
|
||||
SlowHash: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: false,
|
||||
UserMetadata: false,
|
||||
FilterAware: true,
|
||||
PartialUploads: false,
|
||||
NoMultiThreading: false,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "/" {
|
||||
|
||||
r := f.root
|
||||
|
||||
folderPath := f.EncodePath(r[:strings.LastIndex(r, "/")+1])
|
||||
fileName := f.EncodeFileName(r[strings.LastIndex(r, "/")+1:])
|
||||
|
||||
file := f.getFileByName(ctx, folderPath, fileName)
|
||||
|
||||
if file != nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
f.root = newRoot
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return strings.TrimLeft(f.root, "/")
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FS imagekit: %s", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs.
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
remote := path.Join(f.root, dir)
|
||||
|
||||
remote = f.EncodePath(remote)
|
||||
|
||||
if remote != "/" {
|
||||
parentFolderPath, folderName := path.Split(remote)
|
||||
folderExists, err := f.getFolderByName(ctx, parentFolderPath, folderName)
|
||||
|
||||
if err != nil {
|
||||
return make(fs.DirEntries, 0), err
|
||||
}
|
||||
|
||||
if folderExists == nil {
|
||||
return make(fs.DirEntries, 0), fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
|
||||
folders, folderError := f.getFolders(ctx, remote)
|
||||
|
||||
if folderError != nil {
|
||||
return make(fs.DirEntries, 0), folderError
|
||||
}
|
||||
|
||||
files, fileError := f.getFiles(ctx, remote, f.opt.Versions)
|
||||
|
||||
if fileError != nil {
|
||||
return make(fs.DirEntries, 0), fileError
|
||||
}
|
||||
|
||||
res := make([]fs.DirEntry, 0, len(folders)+len(files))
|
||||
|
||||
for _, folder := range folders {
|
||||
folderPath := f.DecodePath(strings.TrimLeft(strings.Replace(folder.FolderPath, f.EncodePath(f.root), "", 1), "/"))
|
||||
res = append(res, fs.NewDir(folderPath, folder.UpdatedAt))
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
res = append(res, f.newObject(ctx, remote, file))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newObject(ctx context.Context, remote string, file client.File) *Object {
|
||||
remoteFile := strings.TrimLeft(strings.Replace(file.FilePath, f.EncodePath(f.root), "", 1), "/")
|
||||
|
||||
folderPath, fileName := path.Split(remoteFile)
|
||||
|
||||
folderPath = f.DecodePath(folderPath)
|
||||
fileName = f.DecodeFileName(fileName)
|
||||
|
||||
remoteFile = path.Join(folderPath, fileName)
|
||||
|
||||
if file.Type == "file-version" {
|
||||
remoteFile = version.Add(remoteFile, file.UpdatedAt)
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remoteFile,
|
||||
filePath: file.FilePath,
|
||||
contentType: file.Mime,
|
||||
timestamp: file.UpdatedAt,
|
||||
file: file,
|
||||
versionID: file.VersionInfo["id"],
|
||||
}
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remoteFile,
|
||||
filePath: file.FilePath,
|
||||
contentType: file.Mime,
|
||||
timestamp: file.UpdatedAt,
|
||||
file: file,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
//
|
||||
// If remote points to a directory then it should return
|
||||
// ErrorIsDir if possible without doing any extra work,
|
||||
// otherwise ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
r := path.Join(f.root, remote)
|
||||
|
||||
folderPath, fileName := path.Split(r)
|
||||
|
||||
folderPath = f.EncodePath(folderPath)
|
||||
fileName = f.EncodeFileName(fileName)
|
||||
|
||||
isFolder, err := f.getFolderByName(ctx, folderPath, fileName)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFolder != nil {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
file := f.getFileByName(ctx, folderPath, fileName)
|
||||
|
||||
if file == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return f.newObject(ctx, r, *file), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return uploadFile(ctx, f, in, src.Remote(), options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
remote := path.Join(f.root, dir)
|
||||
parentFolderPath, folderName := path.Split(remote)
|
||||
|
||||
parentFolderPath = f.EncodePath(parentFolderPath)
|
||||
folderName = f.EncodeFileName(folderName)
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = f.ik.CreateFolder(ctx, client.CreateFolderParam{
|
||||
ParentFolderPath: parentFolderPath,
|
||||
FolderName: folderName,
|
||||
})
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
|
||||
entries, err := f.List(ctx, dir)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(entries) > 0 {
|
||||
return errors.New("directory is not empty")
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
|
||||
FolderPath: f.EncodePath(path.Join(f.root, dir)),
|
||||
})
|
||||
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
|
||||
remote := path.Join(f.root, dir)
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
|
||||
FolderPath: f.EncodePath(remote),
|
||||
})
|
||||
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
|
||||
duration := time.Duration(math.Abs(float64(expire)))
|
||||
|
||||
expireSeconds := duration.Seconds()
|
||||
|
||||
fileRemote := path.Join(f.root, remote)
|
||||
|
||||
folderPath, fileName := path.Split(fileRemote)
|
||||
folderPath = f.EncodePath(folderPath)
|
||||
fileName = f.EncodeFileName(fileName)
|
||||
|
||||
file := f.getFileByName(ctx, folderPath, fileName)
|
||||
|
||||
if file == nil {
|
||||
return "", fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Pacer not needed as this doesn't use the API
|
||||
url, err := f.ik.URL(client.URLParam{
|
||||
Src: file.URL,
|
||||
Signed: *file.IsPrivateFile || f.opt.OnlySigned,
|
||||
ExpireSeconds: int64(expireSeconds),
|
||||
QueryParameters: map[string]string{
|
||||
"updatedAt": file.UpdatedAt.String(),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return url, nil
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.file.Name
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime(context.Context) time.Time {
|
||||
return o.file.UpdatedAt
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.file.Size)
|
||||
}
|
||||
|
||||
// MimeType returns the MIME type of the file
|
||||
func (o *Object) MimeType(context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
|
||||
fs.FixRangeOption(options, -1)
|
||||
partialContent := false
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(-1)
|
||||
partialContent = true
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
partialContent = true
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pacer not needed as this doesn't use the API
|
||||
url, err := o.fs.ik.URL(client.URLParam{
|
||||
Src: o.file.URL,
|
||||
Signed: *o.file.IsPrivateFile || o.fs.opt.OnlySigned,
|
||||
QueryParameters: map[string]string{
|
||||
"tr": "orig-true",
|
||||
"updatedAt": o.file.UpdatedAt.String(),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+count-1))
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
end := resp.ContentLength
|
||||
|
||||
if partialContent && resp.StatusCode == http.StatusOK {
|
||||
skip := offset
|
||||
|
||||
if offset < 0 {
|
||||
skip = end + offset + 1
|
||||
}
|
||||
|
||||
_, err = io.CopyN(io.Discard, resp.Body, skip)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(resp.Body, end-skip), nil
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
srcRemote := o.Remote()
|
||||
|
||||
remote := path.Join(o.fs.root, srcRemote)
|
||||
folderPath, fileName := path.Split(remote)
|
||||
|
||||
UseUniqueFileName := new(bool)
|
||||
*UseUniqueFileName = false
|
||||
|
||||
var resp *client.UploadResult
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{
|
||||
FileName: fileName,
|
||||
Folder: folderPath,
|
||||
IsPrivateFile: o.file.IsPrivateFile,
|
||||
})
|
||||
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileID := resp.FileID
|
||||
|
||||
_, file, err := o.fs.ik.File(ctx, fileID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.file = *file
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = o.fs.ik.DeleteFile(ctx, o.file.FileID)
|
||||
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := path.Join(f.root, srcRemote)
|
||||
folderPath, fileName := path.Split(remote)
|
||||
|
||||
folderPath = f.EncodePath(folderPath)
|
||||
fileName = f.EncodeFileName(fileName)
|
||||
|
||||
UseUniqueFileName := new(bool)
|
||||
*UseUniqueFileName = false
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
var err error
|
||||
res, _, err = f.ik.Upload(ctx, in, client.UploadParam{
|
||||
FileName: fileName,
|
||||
Folder: folderPath,
|
||||
IsPrivateFile: &f.opt.OnlySigned,
|
||||
})
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, srcRemote)
|
||||
}
|
||||
|
||||
// Metadata returns the metadata for the object
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
|
||||
metadata.Set("btime", o.file.CreatedAt.Format(time.RFC3339))
|
||||
metadata.Set("size", strconv.FormatUint(o.file.Size, 10))
|
||||
metadata.Set("file-type", o.file.FileType)
|
||||
metadata.Set("height", strconv.Itoa(o.file.Height))
|
||||
metadata.Set("width", strconv.Itoa(o.file.Width))
|
||||
metadata.Set("has-alpha", strconv.FormatBool(o.file.HasAlpha))
|
||||
|
||||
for k, v := range o.file.EmbeddedMetadata {
|
||||
metadata.Set(k, fmt.Sprint(v))
|
||||
}
|
||||
|
||||
if o.file.Tags != nil {
|
||||
metadata.Set("tags", strings.Join(o.file.Tags, ","))
|
||||
}
|
||||
|
||||
if o.file.CustomCoordinates != nil {
|
||||
metadata.Set("custom-coordinates", *o.file.CustomCoordinates)
|
||||
}
|
||||
|
||||
if o.file.IsPrivateFile != nil {
|
||||
metadata.Set("is-private-file", strconv.FormatBool(*o.file.IsPrivateFile))
|
||||
}
|
||||
|
||||
if o.file.AITags != nil {
|
||||
googleTags := []string{}
|
||||
awsTags := []string{}
|
||||
|
||||
for _, tag := range o.file.AITags {
|
||||
if tag.Source == "google-auto-tagging" {
|
||||
googleTags = append(googleTags, tag.Name)
|
||||
} else if tag.Source == "aws-auto-tagging" {
|
||||
awsTags = append(awsTags, tag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(googleTags) > 0 {
|
||||
metadata.Set("google-tags", strings.Join(googleTags, ","))
|
||||
}
|
||||
|
||||
if len(awsTags) > 0 {
|
||||
metadata.Set("aws-tags", strings.Join(awsTags, ","))
|
||||
}
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
file, err := srcObj.Open(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uploadFile(ctx, f, file, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
18
backend/imagekit/imagekit_test.go
Normal file
18
backend/imagekit/imagekit_test.go
Normal file
@ -0,0 +1,18 @@
|
||||
package imagekit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
debug := true
|
||||
fstest.Verbose = &debug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestImageKit:",
|
||||
NilObject: (*Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
})
|
||||
}
|
193
backend/imagekit/util.go
Normal file
193
backend/imagekit/util.go
Normal file
@ -0,0 +1,193 @@
|
||||
package imagekit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/imagekit/client"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
func (f *Fs) getFiles(ctx context.Context, path string, includeVersions bool) (files []client.File, err error) {
|
||||
|
||||
files = make([]client.File, 0)
|
||||
|
||||
var hasMore = true
|
||||
|
||||
for hasMore {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var data *[]client.File
|
||||
var res *http.Response
|
||||
res, data, err = f.ik.Files(ctx, client.FilesOrFolderParam{
|
||||
Skip: len(files),
|
||||
Limit: 100,
|
||||
Path: path,
|
||||
}, includeVersions)
|
||||
|
||||
hasMore = !(len(*data) == 0 || len(*data) < 100)
|
||||
|
||||
if len(*data) > 0 {
|
||||
files = append(files, *data...)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return make([]client.File, 0), err
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getFolders(ctx context.Context, path string) (folders []client.Folder, err error) {
|
||||
|
||||
folders = make([]client.Folder, 0)
|
||||
|
||||
var hasMore = true
|
||||
|
||||
for hasMore {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var data *[]client.Folder
|
||||
var res *http.Response
|
||||
res, data, err = f.ik.Folders(ctx, client.FilesOrFolderParam{
|
||||
Skip: len(folders),
|
||||
Limit: 100,
|
||||
Path: path,
|
||||
})
|
||||
|
||||
hasMore = !(len(*data) == 0 || len(*data) < 100)
|
||||
|
||||
if len(*data) > 0 {
|
||||
folders = append(folders, *data...)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return make([]client.Folder, 0), err
|
||||
}
|
||||
|
||||
return folders, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getFileByName(ctx context.Context, path string, name string) (file *client.File) {
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, data, err := f.ik.Files(ctx, client.FilesOrFolderParam{
|
||||
Limit: 1,
|
||||
Path: path,
|
||||
SearchQuery: fmt.Sprintf(`type = "file" AND name = %s`, strconv.Quote(name)),
|
||||
}, false)
|
||||
|
||||
if len(*data) == 0 {
|
||||
file = nil
|
||||
} else {
|
||||
file = &(*data)[0]
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func (f *Fs) getFolderByName(ctx context.Context, path string, name string) (folder *client.Folder, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, data, err := f.ik.Folders(ctx, client.FilesOrFolderParam{
|
||||
Limit: 1,
|
||||
Path: path,
|
||||
SearchQuery: fmt.Sprintf(`type = "folder" AND name = %s`, strconv.Quote(name)),
|
||||
})
|
||||
|
||||
if len(*data) == 0 {
|
||||
folder = nil
|
||||
} else {
|
||||
folder = &(*data)[0]
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (e.g. "Token has expired")
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
|
||||
var retryAfter = 1
|
||||
retryAfterString := resp.Header.Get("X-RateLimit-Reset")
|
||||
if retryAfterString != "" {
|
||||
var err error
|
||||
retryAfter, err = strconv.Atoi(retryAfterString)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Malformed %s header %q: %v", "X-RateLimit-Reset", retryAfterString, err)
|
||||
}
|
||||
}
|
||||
|
||||
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Millisecond)
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err) || shouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// EncodePath encapsulates the logic for encoding a path
|
||||
func (f *Fs) EncodePath(str string) string {
|
||||
return f.opt.Enc.FromStandardPath(str)
|
||||
}
|
||||
|
||||
// DecodePath encapsulates the logic for decoding a path
|
||||
func (f *Fs) DecodePath(str string) string {
|
||||
return f.opt.Enc.ToStandardPath(str)
|
||||
}
|
||||
|
||||
// EncodeFileName encapsulates the logic for encoding a file name
|
||||
func (f *Fs) EncodeFileName(str string) string {
|
||||
return f.opt.Enc.FromStandardName(str)
|
||||
}
|
||||
|
||||
// DecodeFileName encapsulates the logic for decoding a file name
|
||||
func (f *Fs) DecodeFileName(str string) string {
|
||||
return f.opt.Enc.ToStandardName(str)
|
||||
}
|
@ -50,6 +50,7 @@ docs = [
|
||||
"hdfs.md",
|
||||
"hidrive.md",
|
||||
"http.md",
|
||||
"imagekit.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
|
205
docs/content/imagekit.md
Normal file
205
docs/content/imagekit.md
Normal file
@ -0,0 +1,205 @@
|
||||
---
|
||||
title: "ImageKit"
|
||||
description: "Rclone docs for ImageKit backend."
|
||||
versionIntroduced: "v1.63"
|
||||
|
||||
---
|
||||
# {{< icon "fa fa-cloud" >}} ImageKit
|
||||
This is a backend for the [ImageKit.io](https://imagekit.io/) storage service.
|
||||
|
||||
#### About ImageKit
|
||||
[ImageKit.io](https://imagekit.io/) provides real-time image and video optimizations, transformations, and CDN delivery. Over 1,000 businesses and 70,000 developers trust ImageKit with their images and videos on the web.
|
||||
|
||||
|
||||
#### Accounts & Pricing
|
||||
|
||||
To use this backend, you need to [create an account](https://imagekit.io/registration/) on ImageKit. Start with a free plan with generous usage limits. Then, as your requirements grow, upgrade to a plan that best fits your needs. See [the pricing details](https://imagekit.io/plans).
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making an imagekit configuration.
|
||||
|
||||
Firstly create a [ImageKit.io](https://imagekit.io/) account and choose a plan.
|
||||
|
||||
You will need to log in and get the `publicKey` and `privateKey` for your account from the developer section.
|
||||
|
||||
Now run
|
||||
```
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter the name for the new remote.
|
||||
name> imagekit-media-library
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / ImageKit.io
|
||||
\ (imagekit)
|
||||
[snip]
|
||||
Storage> imagekit
|
||||
|
||||
Option endpoint.
|
||||
You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
|
||||
Enter a value.
|
||||
endpoint> https://ik.imagekit.io/imagekit_id
|
||||
|
||||
Option public_key.
|
||||
You can find your ImageKit.io public key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
|
||||
Enter a value.
|
||||
public_key> public_****************************
|
||||
|
||||
Option private_key.
|
||||
You can find your ImageKit.io private key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
|
||||
Enter a value.
|
||||
private_key> private_****************************
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: imagekit
|
||||
- endpoint: https://ik.imagekit.io/imagekit_id
|
||||
- public_key: public_****************************
|
||||
- private_key: private_****************************
|
||||
|
||||
Keep this "imagekit-media-library" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
List directories in the top level of your Media Library
|
||||
```
|
||||
rclone lsd imagekit-media-library:
|
||||
```
|
||||
Make a new directory.
|
||||
```
|
||||
rclone mkdir imagekit-media-library:directory
|
||||
```
|
||||
List the contents of a directory.
|
||||
```
|
||||
rclone ls imagekit-media-library:directory
|
||||
```
|
||||
|
||||
### Modified time and hashes
|
||||
|
||||
ImageKit does not support modification times or hashes yet.
|
||||
|
||||
### Checksums
|
||||
|
||||
No checksums are supported.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/imagekit/imagekit.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to imagekit (ImageKit.io).
|
||||
|
||||
#### --imagekit-endpoint
|
||||
|
||||
You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_IMAGEKIT_ENDPOINT
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --imagekit-public-key
|
||||
|
||||
You can find your ImageKit.io public key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: public_key
|
||||
- Env Var: RCLONE_IMAGEKIT_PUBLIC_KEY
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --imagekit-private-key
|
||||
|
||||
You can find your ImageKit.io private key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: private_key
|
||||
- Env Var: RCLONE_IMAGEKIT_PRIVATE_KEY
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to imagekit (ImageKit.io).
|
||||
|
||||
#### --imagekit-only-signed
|
||||
|
||||
If you have configured `Restrict unsigned image URLs` in your dashboard settings, set this to true.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: only_signed
|
||||
- Env Var: RCLONE_IMAGEKIT_ONLY_SIGNED
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --imagekit-versions
|
||||
|
||||
Include old versions in directory listings.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: versions
|
||||
- Env Var: RCLONE_IMAGEKIT_VERSIONS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --imagekit-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_IMAGEKIT_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,LtGt,DoubleQuote,Dollar,Question,Hash,Percent,BackSlash,Del,Ctl,InvalidUtf8,Dot,SquareBracket
|
||||
|
||||
### Metadata
|
||||
|
||||
Any metadata supported by the underlying remote is read and written.
|
||||
|
||||
Here are the possible system metadata items for the imagekit backend.
|
||||
|
||||
| Name | Help | Type | Example | Read Only |
|
||||
|------|------|------|---------|-----------|
|
||||
| aws-tags | AI generated tags by AWS Rekognition associated with the file | string | tag1,tag2 | **Y** |
|
||||
| btime | Time of file birth (creation) read from Last-Modified header | RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 | **Y** |
|
||||
| custom-coordinates | Custom coordinates of the file | string | 0,0,100,100 | **Y** |
|
||||
| file-type | Type of the file | string | image | **Y** |
|
||||
| google-tags | AI generated tags by Google Cloud Vision associated with the file | string | tag1,tag2 | **Y** |
|
||||
| has-alpha | Whether the image has alpha channel or not | bool | | **Y** |
|
||||
| height | Height of the image or video in pixels | int | | **Y** |
|
||||
| is-private-file | Whether the file is private or not | bool | | **Y** |
|
||||
| size | Size of the object in bytes | int64 | | **Y** |
|
||||
| tags | Tags associated with the file | string | tag1,tag2 | **Y** |
|
||||
| width | Width of the image or video in pixels | int | | **Y** |
|
||||
|
||||
See the [metadata](/docs/#metadata) docs for more info.
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
@ -148,6 +148,9 @@ backends:
|
||||
- backend: "hidrive"
|
||||
remote: "TestHiDrive:"
|
||||
fastlist: false
|
||||
- backend: "imagekit"
|
||||
remote: "TestImageKit:"
|
||||
fastlist: false
|
||||
- backend: "internetarchive"
|
||||
remote: "TestIA:rclone-integration-test"
|
||||
fastlist: true
|
||||
|
1
go.mod
1
go.mod
@ -79,6 +79,7 @@ require (
|
||||
golang.org/x/text v0.13.0
|
||||
golang.org/x/time v0.3.0
|
||||
google.golang.org/api v0.148.0
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
storj.io/uplink v1.12.1
|
||||
)
|
||||
|
2
go.sum
2
go.sum
@ -935,6 +935,8 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY=
|
||||
gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
|
Loading…
Reference in New Issue
Block a user