pikpak: new backend

Fixes #6429
This commit is contained in:
wiserain 2023-04-05 00:33:48 +09:00 committed by GitHub
parent 64cf9ac911
commit 243bcc9d07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 2849 additions and 0 deletions

View File

@ -67,6 +67,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)

View File

@ -36,6 +36,7 @@ import (
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"

535
backend/pikpak/api/types.go Normal file
View File

@ -0,0 +1,535 @@
// Package api has type definitions for pikpak
//
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"reflect"
"strconv"
"time"
)
const (
// "2022-09-17T14:31:06.056+08:00"
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the pikpak API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
if string(data) == "null" || string(data) == `""` {
return nil
}
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Types of things in Item
const (
KindOfFolder = "drive#folder"
KindOfFile = "drive#file"
KindOfFileList = "drive#fileList"
KindOfResumable = "drive#resumable"
KindOfForm = "drive#form"
ThumbnailSizeS = "SIZE_SMALL"
ThumbnailSizeM = "SIZE_MEDIUM"
ThumbnailSizeL = "SIZE_LARGE"
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
PhaseTypeError = "PHASE_TYPE_ERROR"
PhaseTypePending = "PHASE_TYPE_PENDING"
UploadTypeForm = "UPLOAD_TYPE_FORM"
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
ListLimit = 100
)
// ------------------------------------------------------------
// Error details api error from pikpak
type Error struct {
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
Code int `json:"error_code"`
URL string `json:"error_url,omitempty"`
Message string `json:"error_description,omitempty"`
// can have either of `error_details` or `details``
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
Details []*ErrorDetails `json:"details,omitempty"`
}
// ErrorDetails contains further details of api error
type ErrorDetails struct {
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct {
} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
if e.Message != "" {
out += ": " + e.Message
}
return out
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// ------------------------------------------------------------
// Filters contains parameters for filters when listing.
//
// possible operators
// * in: a list of comma-separated string
// * eq: "true" or "false"
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
type Filters struct {
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
Kind map[string]string `json:"kind,omitempty"` // "eq"
Starred map[string]bool `json:"starred,omitempty"` // "eq"
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
}
// Set sets filter values using field name, operator and corresponding value
func (f *Filters) Set(field, operator, value string) {
if value == "" {
// UNSET for empty values
return
}
r := reflect.ValueOf(f)
fd := reflect.Indirect(r).FieldByName(field)
if v, err := strconv.ParseBool(value); err == nil {
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
} else {
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
}
}
// ------------------------------------------------------------
// Common Elements
// Link contains a download URL for opening files
type Link struct {
URL string `json:"url"`
Token string `json:"token"`
Expire Time `json:"expire"`
Type string `json:"type,omitempty"`
}
// Valid reports whether l is non-nil, has an URL, and is not expired.
func (l *Link) Valid() bool {
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
}
// URL is a basic form of URL
type URL struct {
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
URL string `json:"url,omitempty"`
}
// ------------------------------------------------------------
// Base Elements
// FileList contains a list of File elements
type FileList struct {
Kind string `json:"kind,omitempty"` // drive#fileList
Files []*File `json:"files,omitempty"`
NextPageToken string `json:"next_page_token"`
Version string `json:"version,omitempty"`
VersionOutdated bool `json:"version_outdated,omitempty"`
}
// File is a basic element representing a single file object
//
// There are two types of download links,
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
// 2) the other from File.Medias[].Link.URL.
// Empirically, 2) is less restrictive to multiple concurrent range-requests
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
// However, it is not generally applicable as it is only for meadia.
type File struct {
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"`
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
}
// FileLinks includes links to file at backend
type FileLinks struct {
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
}
// FileAudit contains audit information for the file
type FileAudit struct {
Status string `json:"status,omitempty"` // "STATUS_OK"
Message string `json:"message,omitempty"`
Title string `json:"title,omitempty"`
}
// Media contains info about supported version of media, e.g. original, transcoded, etc
type Media struct {
MediaID string `json:"media_id,omitempty"`
MediaName string `json:"media_name,omitempty"`
Video struct {
Height int `json:"height,omitempty"`
Width int `json:"width,omitempty"`
Duration int64 `json:"duration,omitempty"`
BitRate int `json:"bit_rate,omitempty"`
FrameRate int `json:"frame_rate,omitempty"`
VideoCodec string `json:"video_codec,omitempty"`
AudioCodec string `json:"audio_codec,omitempty"`
VideoType string `json:"video_type,omitempty"`
} `json:"video,omitempty"`
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"`
}
// FileParams includes parameters for instant open
type FileParams struct {
Duration int64 `json:"duration,omitempty,string"` // in seconds
Height int `json:"height,omitempty,string"`
Platform string `json:"platform,omitempty"` // "Upload"
PlatformIcon string `json:"platform_icon,omitempty"`
URL string `json:"url,omitempty"`
Width int `json:"width,omitempty,string"`
}
// FileApp includes parameters for instant open
type FileApp struct {
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []interface{} `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct {
} `json:"params,omitempty"` // TODO
CategoryIds []interface{} `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct {
} `json:"links,omitempty"` // TODO
}
// ------------------------------------------------------------
// TaskList contains a list of Task elements
type TaskList struct {
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
NextPageToken string `json:"next_page_token"`
ExpiresIn int `json:"expires_in,omitempty"`
}
// Task is a basic element representing a single task such as offline download and upload
type Task struct {
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []interface{} `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
}
// TaskParams includes parameters informing status of Task
type TaskParams struct {
Age string `json:"age,omitempty"`
PredictSpeed string `json:"predict_speed,omitempty"`
PredictType string `json:"predict_type,omitempty"`
URL string `json:"url,omitempty"`
}
// Form contains parameters for upload by multipart/form-data
type Form struct {
Headers struct{} `json:"headers"`
Kind string `json:"kind"` // "drive#form"
Method string `json:"method"` // "POST"
MultiParts struct {
OSSAccessKeyID string `json:"OSSAccessKeyId"`
Signature string `json:"Signature"`
Callback string `json:"callback"`
Key string `json:"key"`
Policy string `json:"policy"`
XUserData string `json:"x:user_data"`
} `json:"multi_parts"`
URL string `json:"url"`
}
// Resumable contains parameters for upload by resumable
type Resumable struct {
Kind string `json:"kind,omitempty"` // "drive#resumable"
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
Params *ResumableParams `json:"params,omitempty"`
}
// ResumableParams specifies resumable paramegers
type ResumableParams struct {
AccessKeyID string `json:"access_key_id,omitempty"`
AccessKeySecret string `json:"access_key_secret,omitempty"`
Bucket string `json:"bucket,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Expiration Time `json:"expiration,omitempty"`
Key string `json:"key,omitempty"`
SecurityToken string `json:"security_token,omitempty"`
}
// FileInArchive is a basic element in archive
type FileInArchive struct {
Index int `json:"index,omitempty"`
Filename string `json:"filename,omitempty"`
Filesize string `json:"filesize,omitempty"`
MimeType string `json:"mime_type,omitempty"`
Gcid string `json:"gcid,omitempty"`
Kind string `json:"kind,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Path string `json:"path,omitempty"`
}
// ------------------------------------------------------------
// NewFile is a response to RequestNewFile
type NewFile struct {
File *File `json:"file,omitempty"`
Form *Form `json:"form,omitempty"`
Resumable *Resumable `json:"resumable,omitempty"`
Task *Task `json:"task,omitempty"` // null in this case
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// NewTask is a response to RequestNewTask
type NewTask struct {
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
File *File `json:"file,omitempty"` // null in this case
Task *Task `json:"task,omitempty"`
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
}
// About informs drive status
type About struct {
Kind string `json:"kind,omitempty"` // "drive#about"
Quota *Quota `json:"quota,omitempty"`
ExpiresAt string `json:"expires_at,omitempty"`
Quotas struct {
} `json:"quotas,omitempty"` // maybe []*Quota?
}
// Quota informs drive quota
type Quota struct {
Kind string `json:"kind,omitempty"` // "drive#quota"
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
Usage int64 `json:"usage,omitempty,string"` // bytes in use
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
}
// Share is a response to RequestShare
//
// used in PublicLink()
type Share struct {
ShareID string `json:"share_id,omitempty"`
ShareURL string `json:"share_url,omitempty"`
PassCode string `json:"pass_code,omitempty"`
ShareText string `json:"share_text,omitempty"`
}
// User contains user account information
//
// GET https://user.mypikpak.com/v1/user/me
type User struct {
Sub string `json:"sub,omitempty"` // userid for internal use
Name string `json:"name,omitempty"` // Username
Picture string `json:"picture,omitempty"` // URL to Avatar image
Email string `json:"email,omitempty"` // redacted email address
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
PhoneNumber string `json:"phone_number,omitempty"`
Password string `json:"password,omitempty"` // "SET" if configured
Status string `json:"status,omitempty"` // "ACTIVE"
CreatedAt Time `json:"created_at,omitempty"`
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
}
// UserProvider details third-party authentication
type UserProvider struct {
ID string `json:"id,omitempty"` // e.g. "google.com"
ProviderUserID string `json:"provider_user_id,omitempty"`
Name string `json:"name,omitempty"` // username
}
// VIP includes subscription details about premium account
//
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
type VIP struct {
Result string `json:"result,omitempty"` // "ACCEPTED"
Message string `json:"message,omitempty"`
RedirectURI string `json:"redirect_uri,omitempty"`
Data struct {
Expire Time `json:"expire,omitempty"`
Status string `json:"status,omitempty"` // "invalid" or "ok"
Type string `json:"type,omitempty"` // "novip" or "platinum"
UserID string `json:"user_id,omitempty"` // same as User.Sub
} `json:"data,omitempty"`
}
// DecompressResult is a response to RequestDecompress
type DecompressResult struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"`
TaskID string `json:"task_id,omitempty"` // same as File.Id
FilesNum int `json:"files_num,omitempty"` // number of files in archive
RedirectLink string `json:"redirect_link,omitempty"`
}
// ------------------------------------------------------------
// RequestShare is to request for file share
type RequestShare struct {
FileIds []string `json:"file_ids,omitempty"`
ShareTo string `json:"share_to,omitempty"` // "publiclink",
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
}
// RequestBatch is to request for batch actions
type RequestBatch struct {
Ids []string `json:"ids,omitempty"`
To map[string]string `json:"to,omitempty"`
}
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
type RequestNewFile struct {
// always required
Kind string `json:"kind"` // "drive#folder" or "drive#file"
Name string `json:"name"`
ParentID string `json:"parent_id"`
FolderType string `json:"folder_type"`
// only when uploading a new file
Hash string `json:"hash,omitempty"` // sha1sum
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
Size int64 `json:"size,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// RequestNewTask is to request for creating a new task like offline downloads
//
// Name and ParentID can be left empty.
type RequestNewTask struct {
Kind string `json:"kind,omitempty"` // "drive#file"
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
}
// RequestDecompress is to request for decompress of archive files
type RequestDecompress struct {
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Password string `json:"password,omitempty"` // ""
FileID string `json:"file_id,omitempty"`
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------
// NOT implemented YET
// RequestArchiveFileList is to request for a list of files in archive
//
// POST https://api-drive.mypikpak.com/decompress/v1/list
type RequestArchiveFileList struct {
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
Path string `json:"path,omitempty"` // "" by default
Password string `json:"password,omitempty"` // "" by default
FileID string `json:"file_id,omitempty"`
}
// ArchiveFileList is a response to RequestArchiveFileList
type ArchiveFileList struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"` // ""
TaskID string `json:"task_id,omitempty"` // ""
CurrentPath string `json:"current_path,omitempty"` // ""
Title string `json:"title,omitempty"`
FileSize int64 `json:"file_size,omitempty"`
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Files []*FileInArchive `json:"files,omitempty"`
}

253
backend/pikpak/helper.go Normal file
View File

@ -0,0 +1,253 @@
package pikpak
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/lib/rest"
)
// Globals
const (
cachePrefix = "rclone-pikpak-sha1sum-"
)
// requestDecompress requests decompress of compressed files
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
req := &api.RequestDecompress{
Gcid: file.Hash,
Password: password,
FileID: file.ID,
Files: []*api.FileInArchive{},
DefaultParent: true,
}
opts := rest.Opts{
Method: "POST",
Path: "/decompress/v1/decompress",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getUserInfo gets UserInfo from API
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://user.mypikpak.com/v1/user/me",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get userinfo: %w", err)
}
return
}
// getVIPInfo gets VIPInfo from API
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get vip info: %w", err)
}
return
}
// requestBatchAction requests batch actions to API
//
// action can be one of batch{Copy,Delete,Trash,Untrash}
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files:" + action,
NoResponse: true, // Only returns `{"task_id":""}
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("batch action %q failed: %w", action, err)
}
return nil
}
// requestNewTask requests a new api.NewTask and returns api.Task
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var newTask api.NewTask
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return newTask.Task, nil
}
// requestNewFile requests a new api.NewFile and returns api.File
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getFile gets api.File from API for the ID passed
// and returns rich information containing additional fields below
// * web_content_link
// * thumbnail_link
// * links
// * medias
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
if err == nil && info.Phase != api.PhaseTypeComplete {
// could be pending right after file is created/uploaded.
return true, errors.New("not PHASE_TYPE_COMPLETE")
}
return f.shouldRetry(ctx, resp, err)
})
return
}
// patchFile updates attributes of the file by ID
//
// currently known patchable fields are
// * name
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
opts := rest.Opts{
Method: "PATCH",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getAbout gets drive#quota information from server
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/about",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// requestShare returns information about ssharable links
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/share",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// Read the sha1 of in returning a reader which will read the same contents
//
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
// we need an SHA1
hash := sha1.New()
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
teeReader := io.TeeReader(in, hash)
// nothing to clean up by default
cleanup = func() {}
// don't cache small files on disk to reduce wear of the disk
if size > threshold {
var tempFile *os.File
// create the cache file
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
// clean up the file after we are done downloading
cleanup = func() {
// the file should normally already be close, but just to make sure
_ = tempFile.Close()
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
}
// copy the ENTIRE file to disc and calculate the SHA1 in the process
if _, err = io.Copy(tempFile, teeReader); err != nil {
return
}
// jump to the start of the local file so we can pass it along
if _, err = tempFile.Seek(0, 0); err != nil {
return
}
// replace the already read source with a reader of our cached file
out = tempFile
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
// set the reader to our read memory block
out = bytes.NewReader(inData)
}
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
}

1707
backend/pikpak/pikpak.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
// Test PikPak filesystem interface
package pikpak_test
import (
"testing"
"github.com/rclone/rclone/backend/pikpak"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPikPak:",
NilObject: (*pikpak.Object)(nil),
})
}

View File

@ -64,6 +64,7 @@ docs = [
"sia.md",
"swift.md",
"pcloud.md",
"pikpak.md",
"premiumizeme.md",
"putio.md",
"seafile.md",

View File

@ -152,6 +152,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
{{< provider name="PikPak" home="https://mypikpak.com/" config="/pikpak/" >}}
{{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}}
{{< provider name="put.io" home="https://put.io/" config="/putio/" >}}
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}

View File

@ -63,6 +63,7 @@ See the following for detailed instructions for
* [OpenDrive](/opendrive/)
* [Oracle Object Storage](/oracleobjectstorage/)
* [Pcloud](/pcloud/)
* [PikPak](/pikpak/)
* [premiumize.me](/premiumizeme/)
* [put.io](/putio/)
* [QingStor](/qingstor/)

View File

@ -44,6 +44,7 @@ Here is an overview of the major features of each cloud storage system.
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
| PikPak | MD5 | R | No | No | R | - |
| premiumize.me | - | - | Yes | No | R | - |
| put.io | CRC-32 | R/W | No | Yes | R | - |
| QingStor | MD5 | - ⁹ | No | No | R/W | - |
@ -494,6 +495,7 @@ upon backend-specific capabilities.
| OpenStack Swift | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | No | No | No |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | Yes |
| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | Yes | Yes |
| QingStor | No | Yes | No | No | Yes | Yes | No | No | No | No |

318
docs/content/pikpak.md Normal file
View File

@ -0,0 +1,318 @@
---
title: "PikPak"
description: "Rclone docs for PikPak"
versionIntroduced: "v1.62"
---
# {{< icon "fa fa-cloud" >}} PikPak
PikPak is [a private cloud drive](https://mypikpak.com/).
Paths are specified as `remote:path`, and may be as deep as required, e.g. `remote:directory/subdirectory`.
## Configuration
Here is an example of making a remote for PikPak.
First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
Enter name for new remote.
name> remote
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
XX / PikPak
\ (pikpak)
Storage> XX
Option user.
Pikpak username.
Enter a value.
user> USERNAME
Option pass.
Pikpak password.
Choose an alternative below.
y) Yes, type in my own password
g) Generate random password
y/g> y
Enter the password:
password:
Confirm the password:
password:
Edit advanced config?
y) Yes
n) No (default)
y/n>
Configuration complete.
Options:
- type: pikpak
- user: USERNAME
- pass: *** ENCRYPTED ***
- token: {"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"}
Keep this "remote" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/pikpak/pikpak.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to pikpak (PikPak).
#### --pikpak-user
Pikpak username.
Properties:
- Config: user
- Env Var: RCLONE_PIKPAK_USER
- Type: string
- Required: true
#### --pikpak-pass
Pikpak password.
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
Properties:
- Config: pass
- Env Var: RCLONE_PIKPAK_PASS
- Type: string
- Required: true
### Advanced options
Here are the Advanced options specific to pikpak (PikPak).
#### --pikpak-client-id
OAuth Client Id.
Leave blank normally.
Properties:
- Config: client_id
- Env Var: RCLONE_PIKPAK_CLIENT_ID
- Type: string
- Required: false
#### --pikpak-client-secret
OAuth Client Secret.
Leave blank normally.
Properties:
- Config: client_secret
- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
- Type: string
- Required: false
#### --pikpak-token
OAuth Access Token as a JSON blob.
Properties:
- Config: token
- Env Var: RCLONE_PIKPAK_TOKEN
- Type: string
- Required: false
#### --pikpak-auth-url
Auth server URL.
Leave blank to use the provider defaults.
Properties:
- Config: auth_url
- Env Var: RCLONE_PIKPAK_AUTH_URL
- Type: string
- Required: false
#### --pikpak-token-url
Token server url.
Leave blank to use the provider defaults.
Properties:
- Config: token_url
- Env Var: RCLONE_PIKPAK_TOKEN_URL
- Type: string
- Required: false
#### --pikpak-root-folder-id
ID of the root folder.
Leave blank normally.
Fill in for rclone to use a non root folder as its starting point.
Properties:
- Config: root_folder_id
- Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
- Type: string
- Required: false
#### --pikpak-use-trash
Send files to the trash instead of deleting permanently.
Defaults to true, namely sending files to the trash.
Use `--pikpak-use-trash=false` to delete files permanently instead.
Properties:
- Config: use_trash
- Env Var: RCLONE_PIKPAK_USE_TRASH
- Type: bool
- Default: true
#### --pikpak-trashed-only
Only show files that are in the trash.
This will show trashed files in their original directory structure.
Properties:
- Config: trashed_only
- Env Var: RCLONE_PIKPAK_TRASHED_ONLY
- Type: bool
- Default: false
#### --pikpak-hash-memory-limit
Files bigger than this will be cached on disk to calculate hash if required.
Properties:
- Config: hash_memory_limit
- Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
- Type: SizeSuffix
- Default: 10Mi
#### --pikpak-multi-thread-streams
Max number of streams to use for multi-thread downloads.
This will override global flag `--multi-thread-streams` and defaults to 1 to avoid rate limiting.
Properties:
- Config: multi_thread_streams
- Env Var: RCLONE_PIKPAK_MULTI_THREAD_STREAMS
- Type: int
- Default: 1
#### --pikpak-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_PIKPAK_ENCODING
- Type: MultiEncoder
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
## Backend commands
Here are the commands specific to the pikpak backend.
Run them with
rclone backend COMMAND remote:
The help below will explain what arguments each command takes.
See the [backend](/commands/rclone_backend/) command for more
info on how to pass options and arguments.
These can be run on a running backend using the rc command
[backend/command](/rc/#backend-command).
### addurl
Add offline download task for url
rclone backend addurl remote: [options] [<arguments>+]
This command adds offline download task for url.
Usage:
rclone backend addurl pikpak:dirpath url
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
download will fallback to default 'My Pack' folder.
### decompress
Request decompress of a file/files in a folder
rclone backend decompress remote: [options] [<arguments>+]
This command requests decompress of file/files in a folder.
Usage:
rclone backend decompress pikpak:dirpath {filename} -o password=password
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
An optional argument 'filename' can be specified for a file located in
'pikpak:dirpath'. You may want to pass '-o password=password' for a
password-protected files. Also, pass '-o delete-src-file' to delete
source files after decompression finished.
Result:
{
"Decompressed": 17,
"SourceDeleted": 0,
"Errors": 0
}
{{< rem autogenerated options stop >}}
## Limitations ##
### Hashes ###
PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded files.
### Deleted files ###
Deleted files will still be visible with `--pikpak-trashed-only` even after the trash emptied. This goes away after few days.

View File

@ -87,6 +87,7 @@
<a class="dropdown-item" href="/swift/"><i class="fa fa-space-shuttle fa-fw"></i> Openstack Swift</a>
<a class="dropdown-item" href="/oracleobjectstorage/"><i class="fa fa-cloud fa-fw"></i> Oracle Object Storage</a>
<a class="dropdown-item" href="/pcloud/"><i class="fa fa-cloud fa-fw"></i> pCloud</a>
<a class="dropdown-item" href="/pikpak/"><i class="fa fa-cloud fa-fw"></i> PikPak</a>
<a class="dropdown-item" href="/premiumizeme/"><i class="fa fa-user fa-fw"></i> premiumize.me</a>
<a class="dropdown-item" href="/putio/"><i class="fas fa-parking fa-fw"></i> put.io</a>
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>

View File

@ -302,6 +302,17 @@ backends:
- backend: "pcloud"
remote: "TestPcloud:"
fastlist: true
- backend: "pikpak"
remote: "TestPikPak:"
fastlist: false
ignore:
# fs/operations
- TestCheckSum
- TestHashSums/Md5
# fs/sync
- TestSyncWithTrackRenames
# integration
- TestIntegration/FsMkdir/FsPutFiles/ObjectMimeType
- backend: "webdav"
remote: "TestWebdavNextcloud:"
ignore: