mirror of
https://github.com/rclone/rclone.git
synced 2024-12-23 07:29:35 +01:00
build: remove unused code spotted by the deadcode linter
This commit is contained in:
parent
cb5bd47e61
commit
1320e84bc2
@ -15,8 +15,6 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
remoteName = "TestAlias"
|
remoteName = "TestAlias"
|
||||||
testPath = "test"
|
|
||||||
filesPath = filepath.Join(testPath, "files")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func prepare(t *testing.T, root string) {
|
func prepare(t *testing.T, root string) {
|
||||||
|
@ -18,7 +18,6 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -40,7 +39,6 @@ import (
|
|||||||
const (
|
const (
|
||||||
folderKind = "FOLDER"
|
folderKind = "FOLDER"
|
||||||
fileKind = "FILE"
|
fileKind = "FILE"
|
||||||
assetKind = "ASSET"
|
|
||||||
statusAvailable = "AVAILABLE"
|
statusAvailable = "AVAILABLE"
|
||||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||||
minSleep = 20 * time.Millisecond
|
minSleep = 20 * time.Millisecond
|
||||||
@ -138,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a acd path
|
|
||||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
|
||||||
|
|
||||||
// parsePath parses an acd 'url'
|
// parsePath parses an acd 'url'
|
||||||
func parsePath(path string) (root string) {
|
func parsePath(path string) (root string) {
|
||||||
root = strings.Trim(path, "/")
|
root = strings.Trim(path, "/")
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
@ -41,7 +40,7 @@ const (
|
|||||||
timeFormatIn = time.RFC3339
|
timeFormatIn = time.RFC3339
|
||||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||||
maxTotalParts = 50000 // in multipart upload
|
maxTotalParts = 50000 // in multipart upload
|
||||||
maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@ -847,16 +846,6 @@ func (o *Object) Storable() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// openFile represents an Object open for reading
|
|
||||||
type openFile struct {
|
|
||||||
o *Object // Object we are reading for
|
|
||||||
resp *http.Response // response of the GET
|
|
||||||
body io.Reader // reading from here
|
|
||||||
hash gohash.Hash // currently accumulating MD5
|
|
||||||
bytes int64 // number of bytes read on this connection
|
|
||||||
eof bool // whether we have read end of file
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
getBlobOptions := storage.GetBlobOptions{}
|
getBlobOptions := storage.GetBlobOptions{}
|
||||||
@ -924,20 +913,6 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// urlEncode encodes in with % encoding
|
|
||||||
func urlEncode(in string) string {
|
|
||||||
var out bytes.Buffer
|
|
||||||
for i := 0; i < len(in); i++ {
|
|
||||||
c := in[i]
|
|
||||||
if noNeedToEncode[c] {
|
|
||||||
_ = out.WriteByte(c)
|
|
||||||
} else {
|
|
||||||
_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// uploadMultipart uploads a file using multipart upload
|
// uploadMultipart uploads a file using multipart upload
|
||||||
//
|
//
|
||||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||||
|
@ -16,7 +16,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -135,9 +134,6 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a box path
|
|
||||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
|
||||||
|
|
||||||
// parsePath parses an box 'url'
|
// parsePath parses an box 'url'
|
||||||
func parsePath(path string) (root string) {
|
func parsePath(path string) (root string) {
|
||||||
root = strings.Trim(path, "/")
|
root = strings.Trim(path, "/")
|
||||||
|
7
backend/cache/storage_persistent.go
vendored
7
backend/cache/storage_persistent.go
vendored
@ -1070,10 +1070,3 @@ func itob(v int64) []byte {
|
|||||||
func btoi(d []byte) int64 {
|
func btoi(d []byte) int64 {
|
||||||
return int64(binary.BigEndian.Uint64(d))
|
return int64(binary.BigEndian.Uint64(d))
|
||||||
}
|
}
|
||||||
|
|
||||||
// cloneBytes returns a copy of a given slice.
|
|
||||||
func cloneBytes(v []byte) []byte {
|
|
||||||
var clone = make([]byte, len(v))
|
|
||||||
copy(clone, v)
|
|
||||||
return clone
|
|
||||||
}
|
|
||||||
|
@ -30,9 +30,6 @@ import (
|
|||||||
const (
|
const (
|
||||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||||
statusResumeIncomplete = 308
|
statusResumeIncomplete = 308
|
||||||
|
|
||||||
// Number of times to try each chunk
|
|
||||||
maxTries = 10
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||||
@ -192,7 +189,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upload uploads the chunks from the input
|
// Upload uploads the chunks from the input
|
||||||
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
|
// It retries each chunk using the pacer and --low-level-retries
|
||||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||||
start := int64(0)
|
start := int64(0)
|
||||||
var StatusCode int
|
var StatusCode int
|
||||||
|
@ -419,21 +419,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A read closer which doesn't close the input
|
|
||||||
type readCloser struct {
|
|
||||||
in io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read bytes from the object - see io.Reader
|
|
||||||
func (rc *readCloser) Read(p []byte) (n int, err error) {
|
|
||||||
return rc.in.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dummy close function
|
|
||||||
func (rc *readCloser) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
|
@ -19,7 +19,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -114,9 +113,6 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a mega path
|
|
||||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
|
||||||
|
|
||||||
// parsePath parses an mega 'url'
|
// parsePath parses an mega 'url'
|
||||||
func parsePath(path string) (root string) {
|
func parsePath(path string) (root string) {
|
||||||
root = strings.Trim(path, "/")
|
root = strings.Trim(path, "/")
|
||||||
|
@ -12,7 +12,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -280,9 +279,6 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a one drive path
|
|
||||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
|
||||||
|
|
||||||
// parsePath parses an one drive 'url'
|
// parsePath parses an one drive 'url'
|
||||||
func parsePath(path string) (root string) {
|
func parsePath(path string) (root string) {
|
||||||
root = strings.Trim(path, "/")
|
root = strings.Trim(path, "/")
|
||||||
|
@ -44,7 +44,6 @@ const (
|
|||||||
Size = 20
|
Size = 20
|
||||||
bitsInLastCell = 32
|
bitsInLastCell = 32
|
||||||
shift = 11
|
shift = 11
|
||||||
threshold = 600
|
|
||||||
widthInBits = 8 * Size
|
widthInBits = 8 * Size
|
||||||
dataSize = (widthInBits-1)/64 + 1
|
dataSize = (widthInBits-1)/64 + 1
|
||||||
)
|
)
|
||||||
|
@ -17,7 +17,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -130,9 +129,6 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a pcloud path
|
|
||||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
|
||||||
|
|
||||||
// parsePath parses an pcloud 'url'
|
// parsePath parses an pcloud 'url'
|
||||||
func parsePath(path string) (root string) {
|
func parsePath(path string) (root string) {
|
||||||
root = strings.Trim(path, "/")
|
root = strings.Trim(path, "/")
|
||||||
|
@ -19,8 +19,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
|
// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
|
||||||
maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
|
// maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
|
||||||
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
|
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
|
||||||
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
|
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
|
||||||
)
|
)
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -143,15 +142,6 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match a webdav path
|
|
||||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
|
||||||
|
|
||||||
// parsePath parses an webdav 'url'
|
|
||||||
func parsePath(path string) (root string) {
|
|
||||||
root = strings.Trim(path, "/")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
429, // Too Many Requests.
|
429, // Too Many Requests.
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
package src
|
|
||||||
|
|
||||||
type apiRequest interface {
|
|
||||||
Request() *HTTPRequest
|
|
||||||
}
|
|
@ -12,10 +12,6 @@ func createGetRequest(client *Client, path string, params map[string]interface{}
|
|||||||
return createRequest(client, "GET", path, params)
|
return createRequest(client, "GET", path, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPostRequest(client *Client, path string, params map[string]interface{}) *HTTPRequest {
|
|
||||||
return createRequest(client, "POST", path, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createRequest(client *Client, method string, path string, parameters map[string]interface{}) *HTTPRequest {
|
func createRequest(client *Client, method string, path string, parameters map[string]interface{}) *HTTPRequest {
|
||||||
var headers = make(map[string][]string)
|
var headers = make(map[string][]string)
|
||||||
headers["Authorization"] = []string{"OAuth " + client.token}
|
headers["Authorization"] = []string{"OAuth " + client.token}
|
||||||
|
@ -50,7 +50,6 @@ var (
|
|||||||
errorUncategorized = errors.New("uncategorized error")
|
errorUncategorized = errors.New("uncategorized error")
|
||||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||||
errorTooManyArguents = errors.New("too many arguments")
|
errorTooManyArguents = errors.New("too many arguments")
|
||||||
errorUsageError = errors.New("usage error")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
// +build plan9
|
|
||||||
|
|
||||||
package fserrors
|
|
||||||
|
|
||||||
// isClosedConnErrorPlatform reports whether err is an error from use
|
|
||||||
// of a closed network connection using platform specific error codes.
|
|
||||||
func isClosedConnErrorPlatform(err error) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
@ -500,22 +500,6 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
|
|||||||
return DeleteFilesWithBackupDir(toBeDeleted, nil)
|
return DeleteFilesWithBackupDir(toBeDeleted, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read a Objects into add() for the given Fs.
|
|
||||||
// dir is the start directory, "" for root
|
|
||||||
// If includeAll is specified all files will be added,
|
|
||||||
// otherwise only files passing the filter will be added.
|
|
||||||
//
|
|
||||||
// Each object is passed ito the function provided. If that returns
|
|
||||||
// an error then the listing will be aborted and that error returned.
|
|
||||||
func readFilesFn(f fs.Fs, includeAll bool, dir string, add func(fs.Object) error) (err error) {
|
|
||||||
return walk.Walk(f, "", includeAll, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return entries.ForObjectError(add)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SameConfig returns true if fdst and fsrc are using the same config
|
// SameConfig returns true if fdst and fsrc are using the same config
|
||||||
// file entry
|
// file entry
|
||||||
func SameConfig(fdst, fsrc fs.Info) bool {
|
func SameConfig(fdst, fsrc fs.Info) bool {
|
||||||
|
Loading…
Reference in New Issue
Block a user