zoho: switch to large file upload API for larger files, fix missing URL encoding of filenames for the upload API

This commit is contained in:
buengese 2024-08-30 01:05:40 +02:00 committed by Nick Craig-Wood
parent 7daed30754
commit c172742cef
3 changed files with 147 additions and 32 deletions

View File

@ -180,11 +180,38 @@ func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
return &ufi, nil return &ufi, nil
} }
// LargeUploadInfo is once again a slightly different version of UploadInfo
// returned as part of an LargeUploadResponse by the large file upload API.
type LargeUploadInfo struct {
Attributes struct {
ParentID string `json:"parent_id"`
FileName string `json:"file_name"`
RessourceID string `json:"resource_id"`
FileInfo string `json:"file_info"`
} `json:"attributes"`
}
// GetUploadFileInfo decodes the embedded FileInfo
func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
var ufi UploadFileInfo
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
if err != nil {
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
}
return &ufi, nil
}
// UploadResponse is the response to a file Upload // UploadResponse is the response to a file Upload
type UploadResponse struct { type UploadResponse struct {
Uploads []UploadInfo `json:"data"` Uploads []UploadInfo `json:"data"`
} }
// LargeUploadResponse is the response returned by large file upload API.
type LargeUploadResponse struct {
Uploads []LargeUploadInfo `json:"data"`
Status string `json:"status"`
}
// WriteMetadataRequest is used to write metadata for a // WriteMetadataRequest is used to write metadata for a
// single item // single item
type WriteMetadataRequest struct { type WriteMetadataRequest struct {

View File

@ -14,6 +14,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/google/uuid"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
@ -63,6 +64,7 @@ var (
} }
rootURL = "https://workdrive.zoho.eu/api/v1" rootURL = "https://workdrive.zoho.eu/api/v1"
downloadURL = "https://download.zoho.eu/v1/workdrive" downloadURL = "https://download.zoho.eu/v1/workdrive"
uploadURL = "http://upload.zoho.eu/workdrive-api/v1/"
accountsURL = "https://accounts.zoho.eu" accountsURL = "https://accounts.zoho.eu"
) )
@ -207,7 +209,8 @@ type Fs struct {
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
downloadsrv *rest.Client // the connection to the Download server downloadsrv *rest.Client // the connection to the download server
uploadsrv *rest.Client // the connection to the upload server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
} }
@ -232,7 +235,8 @@ func setupRegion(m configmap.Mapper) error {
return errors.New("no region set") return errors.New("no region set")
} }
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region) rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive",region) downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region) accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region) oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region) oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
@ -410,6 +414,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL), downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL),
uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
@ -648,9 +653,61 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
return return
} }
func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
opts := rest.Opts{
Method: "POST",
Path: "/stream/upload",
Body: in,
ContentLength: &size,
ContentType: "application/octet-stream",
Options: options,
ExtraHeaders: map[string]string{
"x-filename": url.QueryEscape(name),
"x-parent_id": parent,
"override-name-exist": "true",
"upload-id": uuid.New().String(),
"x-streammode": "1",
},
}
var err error
var resp *http.Response
var uploadResponse *api.LargeUploadResponse
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("upload large error: %v", err)
}
if len(uploadResponse.Uploads) != 1 {
return nil, errors.New("upload: invalid response")
}
upload := uploadResponse.Uploads[0]
uploadInfo, err := upload.GetUploadFileInfo()
if err != nil {
return nil, fmt.Errorf("upload error: %w", err)
}
// Fill in the api.Item from the api.UploadFileInfo
var info api.Item
info.ID = upload.Attributes.RessourceID
info.Attributes.Name = upload.Attributes.FileName
// info.Attributes.Type = not used
info.Attributes.IsFolder = false
// info.Attributes.CreatedTime = not used
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
// info.Attributes.UploadedTime = 0 not used
info.Attributes.StorageInfo.Size = uploadInfo.Size
info.Attributes.StorageInfo.FileCount = 0
info.Attributes.StorageInfo.FolderCount = 0
return &info, nil
}
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) { func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
params := url.Values{} params := url.Values{}
params.Set("filename", name) params.Set("filename", url.QueryEscape(name))
params.Set("parent_id", parent) params.Set("parent_id", parent)
params.Set("override-name-exist", strconv.FormatBool(true)) params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name) formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
@ -710,6 +767,11 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
size := src.Size() size := src.Size()
remote := src.Remote() remote := src.Remote()
@ -719,12 +781,26 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return nil, err return nil, err
} }
// Upload the file // use normal upload API for small sizes (<10MiB)
if size < 10*1024*1024 {
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.newObjectWithInfo(ctx, remote, info) return f.newObjectWithInfo(ctx, remote, info)
}
// large file API otherwise
info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
default:
return nil, err
}
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
@ -1188,11 +1264,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// Overwrite the old file // use normal upload API for small sizes (<10MiB)
if size < 10*1024*1024 {
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil { if err != nil {
return err return err
} }
return o.setMetaData(info)
}
// large file API otherwise
info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil {
return err
}
return o.setMetaData(info) return o.setMetaData(info)
} }

View File

@ -12,6 +12,7 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestZoho:", RemoteName: "TestZoho:",
SkipInvalidUTF8: true,
NilObject: (*zoho.Object)(nil), NilObject: (*zoho.Object)(nil),
}) })
} }