mirror of
https://github.com/rclone/rclone.git
synced 2025-01-11 00:40:03 +01:00
Allow all options to be set from environment variables
The option names are munged changing - to _ making upper case and prepending RCLONE_. The values are as parsed by pflag.
This commit is contained in:
parent
3b0f944e23
commit
0d75d2585f
@ -31,7 +31,6 @@ import (
|
|||||||
"github.com/ncw/rclone/pacer"
|
"github.com/ncw/rclone/pacer"
|
||||||
"github.com/ncw/rclone/rest"
|
"github.com/ncw/rclone/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -51,7 +50,7 @@ const (
|
|||||||
var (
|
var (
|
||||||
// Flags
|
// Flags
|
||||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||||
uploadWaitPerGB = pflag.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
uploadWaitPerGB = fs.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
acdConfig = &oauth2.Config{
|
acdConfig = &oauth2.Config{
|
||||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||||
@ -85,7 +84,7 @@ func init() {
|
|||||||
Help: "Amazon Application Client Secret - leave blank normally.",
|
Help: "Amazon Application Client Secret - leave blank normally.",
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
pflag.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
fs.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote acd server
|
// Fs represents a remote acd server
|
||||||
|
9
b2/b2.go
9
b2/b2.go
@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/ncw/rclone/pacer"
|
"github.com/ncw/rclone/pacer"
|
||||||
"github.com/ncw/rclone/rest"
|
"github.com/ncw/rclone/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -50,8 +49,8 @@ var (
|
|||||||
minChunkSize = fs.SizeSuffix(100E6)
|
minChunkSize = fs.SizeSuffix(100E6)
|
||||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
||||||
uploadCutoff = fs.SizeSuffix(200E6)
|
uploadCutoff = fs.SizeSuffix(200E6)
|
||||||
b2TestMode = pflag.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
b2TestMode = fs.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||||
b2Versions = pflag.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
b2Versions = fs.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,8 +72,8 @@ func init() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
pflag.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
fs.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||||
pflag.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
fs.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote b2 server
|
// Fs represents a remote b2 server
|
||||||
|
12
cmd/cmd.go
12
cmd/cmd.go
@ -26,13 +26,13 @@ import (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Flags
|
// Flags
|
||||||
cpuProfile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
cpuProfile = fs.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
||||||
memProfile = pflag.String("memprofile", "", "Write memory profile to file")
|
memProfile = fs.StringP("memprofile", "", "", "Write memory profile to file")
|
||||||
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
|
statsInterval = fs.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
|
||||||
dataRateUnit = pflag.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
|
dataRateUnit = fs.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
|
||||||
version bool
|
version bool
|
||||||
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
|
logFile = fs.StringP("log-file", "", "", "Log everything to this file")
|
||||||
retries = pflag.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
retries = fs.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Root is the main rclone command
|
// Root is the main rclone command
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"github.com/ncw/rclone/oauthutil"
|
"github.com/ncw/rclone/oauthutil"
|
||||||
"github.com/ncw/rclone/pacer"
|
"github.com/ncw/rclone/pacer"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/drive/v2"
|
"google.golang.org/api/drive/v2"
|
||||||
@ -42,10 +41,10 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Flags
|
// Flags
|
||||||
driveFullList = pflag.BoolP("drive-full-list", "", false, "Use a full listing for directory list. More data but usually quicker. (obsolete)")
|
driveFullList = fs.BoolP("drive-full-list", "", false, "Use a full listing for directory list. More data but usually quicker. (obsolete)")
|
||||||
driveAuthOwnerOnly = pflag.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user. Requires drive-full-list.")
|
driveAuthOwnerOnly = fs.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user. Requires drive-full-list.")
|
||||||
driveUseTrash = pflag.BoolP("drive-use-trash", "", false, "Send files to the trash instead of deleting permanently.")
|
driveUseTrash = fs.BoolP("drive-use-trash", "", false, "Send files to the trash instead of deleting permanently.")
|
||||||
driveExtensions = pflag.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
driveExtensions = fs.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
||||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||||
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||||
@ -103,8 +102,8 @@ func init() {
|
|||||||
Help: "Google Application Client Secret - leave blank normally.",
|
Help: "Google Application Client Secret - leave blank normally.",
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
pflag.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
fs.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||||
pflag.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
fs.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
||||||
|
|
||||||
// Invert mimeTypeToExtension
|
// Invert mimeTypeToExtension
|
||||||
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
|
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/oauthutil"
|
"github.com/ncw/rclone/oauthutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"github.com/stacktic/dropbox"
|
"github.com/stacktic/dropbox"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,7 +57,7 @@ func init() {
|
|||||||
Help: "Dropbox App Secret - leave blank normally.",
|
Help: "Dropbox App Secret - leave blank normally.",
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
pflag.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
fs.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configuration helper - called after the user has put in the defaults
|
// Configuration helper - called after the user has put in the defaults
|
||||||
|
281
fs/config.go
281
fs/config.go
@ -14,7 +14,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"path"
|
"path"
|
||||||
@ -26,7 +25,6 @@ import (
|
|||||||
|
|
||||||
"github.com/Unknwon/goconfig"
|
"github.com/Unknwon/goconfig"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/text/unicode/norm"
|
"golang.org/x/text/unicode/norm"
|
||||||
)
|
)
|
||||||
@ -47,18 +45,6 @@ const (
|
|||||||
ConfigAutomatic = "config_automatic"
|
ConfigAutomatic = "config_automatic"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SizeSuffix is parsed by flag with k/M/G suffixes
|
|
||||||
type SizeSuffix int64
|
|
||||||
|
|
||||||
// BwTimeSlot represents a bandwidth configuration at a point in time.
|
|
||||||
type BwTimeSlot struct {
|
|
||||||
hhmm int
|
|
||||||
bandwidth SizeSuffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// BwTimetable contains all configured time slots.
|
|
||||||
type BwTimetable []BwTimeSlot
|
|
||||||
|
|
||||||
// Global
|
// Global
|
||||||
var (
|
var (
|
||||||
// ConfigFile is the config file data structure
|
// ConfigFile is the config file data structure
|
||||||
@ -70,35 +56,35 @@ var (
|
|||||||
// Config is the global config
|
// Config is the global config
|
||||||
Config = &ConfigInfo{}
|
Config = &ConfigInfo{}
|
||||||
// Flags
|
// Flags
|
||||||
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
verbose = BoolP("verbose", "v", false, "Print lots more stuff")
|
||||||
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
|
quiet = BoolP("quiet", "q", false, "Print as little stuff as possible")
|
||||||
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
modifyWindow = DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
||||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
checkers = IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
transfers = IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
configFile = StringP("config", "", ConfigPath, "Config file.")
|
||||||
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
checkSum = BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
||||||
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
sizeOnly = BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
||||||
ignoreTimes = pflag.BoolP("ignore-times", "I", false, "Don't skip files that match size and time - transfer all files")
|
ignoreTimes = BoolP("ignore-times", "I", false, "Don't skip files that match size and time - transfer all files")
|
||||||
ignoreExisting = pflag.BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
|
ignoreExisting = BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
|
||||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
dryRun = BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
connectTimeout = DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
timeout = DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||||
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
dumpHeaders = BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||||
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
dumpBodies = BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||||
dumpAuth = pflag.BoolP("dump-auth", "", false, "Dump HTTP headers with auth info")
|
dumpAuth = BoolP("dump-auth", "", false, "Dump HTTP headers with auth info")
|
||||||
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
|
skipVerify = BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
|
||||||
AskPassword = pflag.BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
|
AskPassword = BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
|
||||||
deleteBefore = pflag.BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
|
deleteBefore = BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
|
||||||
deleteDuring = pflag.BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
|
deleteDuring = BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
|
||||||
deleteAfter = pflag.BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
|
deleteAfter = BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
|
||||||
trackRenames = pflag.BoolP("track-renames", "", false, "When synchronizing, track file renames and do a server side move if possible")
|
trackRenames = BoolP("track-renames", "", false, "When synchronizing, track file renames and do a server side move if possible")
|
||||||
lowLevelRetries = pflag.IntP("low-level-retries", "", 10, "Number of low level retries to do.")
|
lowLevelRetries = IntP("low-level-retries", "", 10, "Number of low level retries to do.")
|
||||||
updateOlder = pflag.BoolP("update", "u", false, "Skip files that are newer on the destination.")
|
updateOlder = BoolP("update", "u", false, "Skip files that are newer on the destination.")
|
||||||
noGzip = pflag.BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
|
noGzip = BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
|
||||||
maxDepth = pflag.IntP("max-depth", "", -1, "If set limits the recursion depth to this.")
|
maxDepth = IntP("max-depth", "", -1, "If set limits the recursion depth to this.")
|
||||||
ignoreSize = pflag.BoolP("ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
ignoreSize = BoolP("ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
||||||
noTraverse = pflag.BoolP("no-traverse", "", false, "Don't traverse destination file system on copy.")
|
noTraverse = BoolP("no-traverse", "", false, "Don't traverse destination file system on copy.")
|
||||||
noUpdateModTime = pflag.BoolP("no-update-modtime", "", false, "Don't update destination mod-time if files identical.")
|
noUpdateModTime = BoolP("no-update-modtime", "", false, "Don't update destination mod-time if files identical.")
|
||||||
bwLimit BwTimetable
|
bwLimit BwTimetable
|
||||||
|
|
||||||
// Key to use for password en/decryption.
|
// Key to use for password en/decryption.
|
||||||
@ -107,216 +93,9 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Turn SizeSuffix into a string and a suffix
|
|
||||||
func (x SizeSuffix) string() (string, string) {
|
|
||||||
scaled := float64(0)
|
|
||||||
suffix := ""
|
|
||||||
switch {
|
|
||||||
case x < 0:
|
|
||||||
return "off", ""
|
|
||||||
case x == 0:
|
|
||||||
return "0", ""
|
|
||||||
case x < 1024:
|
|
||||||
scaled = float64(x)
|
|
||||||
suffix = ""
|
|
||||||
case x < 1024*1024:
|
|
||||||
scaled = float64(x) / 1024
|
|
||||||
suffix = "k"
|
|
||||||
case x < 1024*1024*1024:
|
|
||||||
scaled = float64(x) / 1024 / 1024
|
|
||||||
suffix = "M"
|
|
||||||
default:
|
|
||||||
scaled = float64(x) / 1024 / 1024 / 1024
|
|
||||||
suffix = "G"
|
|
||||||
}
|
|
||||||
if math.Floor(scaled) == scaled {
|
|
||||||
return fmt.Sprintf("%.0f", scaled), suffix
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%.3f", scaled), suffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// String turns SizeSuffix into a string
|
|
||||||
func (x SizeSuffix) String() string {
|
|
||||||
val, suffix := x.string()
|
|
||||||
return val + suffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unit turns SizeSuffix into a string with a unit
|
|
||||||
func (x SizeSuffix) Unit(unit string) string {
|
|
||||||
val, suffix := x.string()
|
|
||||||
if val == "off" {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
return val + " " + suffix + unit
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a SizeSuffix
|
|
||||||
func (x *SizeSuffix) Set(s string) error {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return errors.New("empty string")
|
|
||||||
}
|
|
||||||
if strings.ToLower(s) == "off" {
|
|
||||||
*x = -1
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
suffix := s[len(s)-1]
|
|
||||||
suffixLen := 1
|
|
||||||
var multiplier float64
|
|
||||||
switch suffix {
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
|
|
||||||
suffixLen = 0
|
|
||||||
multiplier = 1 << 10
|
|
||||||
case 'b', 'B':
|
|
||||||
multiplier = 1
|
|
||||||
case 'k', 'K':
|
|
||||||
multiplier = 1 << 10
|
|
||||||
case 'm', 'M':
|
|
||||||
multiplier = 1 << 20
|
|
||||||
case 'g', 'G':
|
|
||||||
multiplier = 1 << 30
|
|
||||||
default:
|
|
||||||
return errors.Errorf("bad suffix %q", suffix)
|
|
||||||
}
|
|
||||||
s = s[:len(s)-suffixLen]
|
|
||||||
value, err := strconv.ParseFloat(s, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if value < 0 {
|
|
||||||
return errors.Errorf("size can't be negative %q", s)
|
|
||||||
}
|
|
||||||
value *= multiplier
|
|
||||||
*x = SizeSuffix(value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type of the value
|
|
||||||
func (x *SizeSuffix) Type() string {
|
|
||||||
return "int64"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check it satisfies the interface
|
|
||||||
var _ pflag.Value = (*SizeSuffix)(nil)
|
|
||||||
|
|
||||||
// String returns a printable representation of BwTimetable.
|
|
||||||
func (x BwTimetable) String() string {
|
|
||||||
ret := []string{}
|
|
||||||
for _, ts := range x {
|
|
||||||
ret = append(ret, fmt.Sprintf("%04.4d,%s", ts.hhmm, ts.bandwidth.String()))
|
|
||||||
}
|
|
||||||
return strings.Join(ret, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the bandwidth timetable.
|
|
||||||
func (x *BwTimetable) Set(s string) error {
|
|
||||||
// The timetable is formatted as:
|
|
||||||
// "hh:mm,bandwidth hh:mm,banwidth..." ex: "10:00,10G 11:30,1G 18:00,off"
|
|
||||||
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
|
|
||||||
|
|
||||||
if len(s) == 0 {
|
|
||||||
return errors.New("empty string")
|
|
||||||
}
|
|
||||||
// Single value without time specification.
|
|
||||||
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
|
|
||||||
ts := BwTimeSlot{}
|
|
||||||
if err := ts.bandwidth.Set(s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ts.hhmm = 0
|
|
||||||
*x = BwTimetable{ts}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tok := range strings.Split(s, " ") {
|
|
||||||
tv := strings.Split(tok, ",")
|
|
||||||
|
|
||||||
// Format must be HH:MM,BW
|
|
||||||
if len(tv) != 2 {
|
|
||||||
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Basic timespec sanity checking
|
|
||||||
hhmm := tv[0]
|
|
||||||
if len(hhmm) != 5 {
|
|
||||||
return errors.Errorf("invalid time specification (hh:mm): %q", hhmm)
|
|
||||||
}
|
|
||||||
hh, err := strconv.Atoi(hhmm[0:2])
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("invalid hour in time specification %q: %v", hhmm, err)
|
|
||||||
}
|
|
||||||
if hh < 0 || hh > 23 {
|
|
||||||
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
|
|
||||||
}
|
|
||||||
mm, err := strconv.Atoi(hhmm[3:])
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("invalid minute in time specification: %q: %v", hhmm, err)
|
|
||||||
}
|
|
||||||
if mm < 0 || mm > 59 {
|
|
||||||
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
|
|
||||||
}
|
|
||||||
|
|
||||||
ts := BwTimeSlot{
|
|
||||||
hhmm: (hh * 100) + mm,
|
|
||||||
}
|
|
||||||
// Bandwidth limit for this time slot.
|
|
||||||
if err := ts.bandwidth.Set(tv[1]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*x = append(*x, ts)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LimitAt returns a BwTimeSlot for the time requested.
|
|
||||||
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
|
|
||||||
// If the timetable is empty, we return an unlimited BwTimeSlot starting at midnight.
|
|
||||||
if len(x) == 0 {
|
|
||||||
return BwTimeSlot{hhmm: 0, bandwidth: -1}
|
|
||||||
}
|
|
||||||
|
|
||||||
hhmm := tt.Hour()*100 + tt.Minute()
|
|
||||||
|
|
||||||
// By default, we return the last element in the timetable. This
|
|
||||||
// satisfies two conditions: 1) If there's only one element it
|
|
||||||
// will always be selected, and 2) The last element of the table
|
|
||||||
// will "wrap around" until overriden by an earlier time slot.
|
|
||||||
// there's only one time slot in the timetable.
|
|
||||||
ret := x[len(x)-1]
|
|
||||||
|
|
||||||
mindif := 0
|
|
||||||
first := true
|
|
||||||
|
|
||||||
// Look for most recent time slot.
|
|
||||||
for _, ts := range x {
|
|
||||||
// Ignore the past
|
|
||||||
if hhmm < ts.hhmm {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dif := ((hhmm / 100 * 60) + (hhmm % 100)) - ((ts.hhmm / 100 * 60) + (ts.hhmm % 100))
|
|
||||||
if first {
|
|
||||||
mindif = dif
|
|
||||||
first = false
|
|
||||||
}
|
|
||||||
if dif <= mindif {
|
|
||||||
mindif = dif
|
|
||||||
ret = ts
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type of the value
|
|
||||||
func (x BwTimetable) Type() string {
|
|
||||||
return "BwTimetable"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check it satisfies the interface
|
|
||||||
var _ pflag.Value = (*BwTimetable)(nil)
|
|
||||||
|
|
||||||
// crypt internals
|
// crypt internals
|
||||||
var (
|
var (
|
||||||
cryptKey = []byte{
|
cryptKey = []byte{
|
||||||
|
@ -4,190 +4,11 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSizeSuffixString(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
in float64
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{0, "0"},
|
|
||||||
{102, "102"},
|
|
||||||
{1024, "1k"},
|
|
||||||
{1024 * 1024, "1M"},
|
|
||||||
{1024 * 1024 * 1024, "1G"},
|
|
||||||
{10 * 1024 * 1024 * 1024, "10G"},
|
|
||||||
{10.1 * 1024 * 1024 * 1024, "10.100G"},
|
|
||||||
{-1, "off"},
|
|
||||||
{-100, "off"},
|
|
||||||
} {
|
|
||||||
ss := SizeSuffix(test.in)
|
|
||||||
got := ss.String()
|
|
||||||
assert.Equal(t, test.want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSizeSuffixUnit(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
in float64
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{0, "0 Bytes"},
|
|
||||||
{102, "102 Bytes"},
|
|
||||||
{1024, "1 kBytes"},
|
|
||||||
{1024 * 1024, "1 MBytes"},
|
|
||||||
{1024 * 1024 * 1024, "1 GBytes"},
|
|
||||||
{10 * 1024 * 1024 * 1024, "10 GBytes"},
|
|
||||||
{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
|
|
||||||
{-1, "off"},
|
|
||||||
{-100, "off"},
|
|
||||||
} {
|
|
||||||
ss := SizeSuffix(test.in)
|
|
||||||
got := ss.Unit("Bytes")
|
|
||||||
assert.Equal(t, test.want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSizeSuffixSet(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
in string
|
|
||||||
want int64
|
|
||||||
err bool
|
|
||||||
}{
|
|
||||||
{"0", 0, false},
|
|
||||||
{"1b", 1, false},
|
|
||||||
{"102B", 102, false},
|
|
||||||
{"0.1k", 102, false},
|
|
||||||
{"0.1", 102, false},
|
|
||||||
{"1K", 1024, false},
|
|
||||||
{"1", 1024, false},
|
|
||||||
{"2.5", 1024 * 2.5, false},
|
|
||||||
{"1M", 1024 * 1024, false},
|
|
||||||
{"1.g", 1024 * 1024 * 1024, false},
|
|
||||||
{"10G", 10 * 1024 * 1024 * 1024, false},
|
|
||||||
{"off", -1, false},
|
|
||||||
{"OFF", -1, false},
|
|
||||||
{"", 0, true},
|
|
||||||
{"1p", 0, true},
|
|
||||||
{"1.p", 0, true},
|
|
||||||
{"1p", 0, true},
|
|
||||||
{"-1K", 0, true},
|
|
||||||
} {
|
|
||||||
ss := SizeSuffix(0)
|
|
||||||
err := ss.Set(test.in)
|
|
||||||
if test.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
assert.Equal(t, test.want, int64(ss))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBwTimetableSet(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
in string
|
|
||||||
want BwTimetable
|
|
||||||
err bool
|
|
||||||
}{
|
|
||||||
{"", BwTimetable{}, true},
|
|
||||||
{"0", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 0}}, false},
|
|
||||||
{"666", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 666 * 1024}}, false},
|
|
||||||
{"10:20,666", BwTimetable{BwTimeSlot{hhmm: 1020, bandwidth: 666 * 1024}}, false},
|
|
||||||
{
|
|
||||||
"11:00,333 13:40,666 23:50,10M 23:59,off",
|
|
||||||
BwTimetable{
|
|
||||||
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 1340, bandwidth: 666 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2350, bandwidth: 10 * 1024 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2359, bandwidth: -1},
|
|
||||||
},
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{"bad,bad", BwTimetable{}, true},
|
|
||||||
{"bad bad", BwTimetable{}, true},
|
|
||||||
{"bad", BwTimetable{}, true},
|
|
||||||
{"1000X", BwTimetable{}, true},
|
|
||||||
{"2401,666", BwTimetable{}, true},
|
|
||||||
{"1061,666", BwTimetable{}, true},
|
|
||||||
} {
|
|
||||||
tt := BwTimetable{}
|
|
||||||
err := tt.Set(test.in)
|
|
||||||
if test.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
assert.Equal(t, test.want, tt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBwTimetableLimitAt(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
tt BwTimetable
|
|
||||||
now time.Time
|
|
||||||
want BwTimeSlot
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
BwTimetable{},
|
|
||||||
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
|
||||||
BwTimeSlot{hhmm: 0, bandwidth: -1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BwTimetable{BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024}},
|
|
||||||
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
|
||||||
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BwTimetable{
|
|
||||||
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
|
||||||
},
|
|
||||||
time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
|
|
||||||
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BwTimetable{
|
|
||||||
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
|
||||||
},
|
|
||||||
time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
|
|
||||||
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BwTimetable{
|
|
||||||
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
|
||||||
},
|
|
||||||
time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
|
|
||||||
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BwTimetable{
|
|
||||||
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
|
||||||
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
|
||||||
},
|
|
||||||
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
|
|
||||||
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
slot := test.tt.LimitAt(test.now)
|
|
||||||
assert.Equal(t, test.want, slot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestObscure(t *testing.T) {
|
func TestObscure(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in string
|
in string
|
||||||
|
29
fs/filter.go
29
fs/filter.go
@ -13,31 +13,30 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Global
|
// Global
|
||||||
var (
|
var (
|
||||||
// Flags
|
// Flags
|
||||||
deleteExcluded = pflag.BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
|
deleteExcluded = BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||||
filterRule = pflag.StringArrayP("filter", "f", nil, "Add a file-filtering rule")
|
filterRule = StringArrayP("filter", "f", nil, "Add a file-filtering rule")
|
||||||
filterFrom = pflag.StringArrayP("filter-from", "", nil, "Read filtering patterns from a file")
|
filterFrom = StringArrayP("filter-from", "", nil, "Read filtering patterns from a file")
|
||||||
excludeRule = pflag.StringArrayP("exclude", "", nil, "Exclude files matching pattern")
|
excludeRule = StringArrayP("exclude", "", nil, "Exclude files matching pattern")
|
||||||
excludeFrom = pflag.StringArrayP("exclude-from", "", nil, "Read exclude patterns from file")
|
excludeFrom = StringArrayP("exclude-from", "", nil, "Read exclude patterns from file")
|
||||||
includeRule = pflag.StringArrayP("include", "", nil, "Include files matching pattern")
|
includeRule = StringArrayP("include", "", nil, "Include files matching pattern")
|
||||||
includeFrom = pflag.StringArrayP("include-from", "", nil, "Read include patterns from file")
|
includeFrom = StringArrayP("include-from", "", nil, "Read include patterns from file")
|
||||||
filesFrom = pflag.StringArrayP("files-from", "", nil, "Read list of source-file names from file")
|
filesFrom = StringArrayP("files-from", "", nil, "Read list of source-file names from file")
|
||||||
minAge = pflag.StringP("min-age", "", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
|
minAge = StringP("min-age", "", "", "Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||||
maxAge = pflag.StringP("max-age", "", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
|
maxAge = StringP("max-age", "", "", "Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||||
minSize = SizeSuffix(-1)
|
minSize = SizeSuffix(-1)
|
||||||
maxSize = SizeSuffix(-1)
|
maxSize = SizeSuffix(-1)
|
||||||
dumpFilters = pflag.BoolP("dump-filters", "", false, "Dump the filters to the output")
|
dumpFilters = BoolP("dump-filters", "", false, "Dump the filters to the output")
|
||||||
//cvsExclude = pflag.BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
//cvsExclude = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
pflag.VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix b|k|M|G")
|
VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix b|k|M|G")
|
||||||
pflag.VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix b|k|M|G")
|
VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix b|k|M|G")
|
||||||
}
|
}
|
||||||
|
|
||||||
// rule is one filter rule
|
// rule is one filter rule
|
||||||
|
315
fs/flags.go
Normal file
315
fs/flags.go
Normal file
@ -0,0 +1,315 @@
|
|||||||
|
// This contains helper functions for managing flags
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SizeSuffix is parsed by flag with k/M/G suffixes
|
||||||
|
type SizeSuffix int64
|
||||||
|
|
||||||
|
// Turn SizeSuffix into a string and a suffix
|
||||||
|
func (x SizeSuffix) string() (string, string) {
|
||||||
|
scaled := float64(0)
|
||||||
|
suffix := ""
|
||||||
|
switch {
|
||||||
|
case x < 0:
|
||||||
|
return "off", ""
|
||||||
|
case x == 0:
|
||||||
|
return "0", ""
|
||||||
|
case x < 1024:
|
||||||
|
scaled = float64(x)
|
||||||
|
suffix = ""
|
||||||
|
case x < 1024*1024:
|
||||||
|
scaled = float64(x) / 1024
|
||||||
|
suffix = "k"
|
||||||
|
case x < 1024*1024*1024:
|
||||||
|
scaled = float64(x) / 1024 / 1024
|
||||||
|
suffix = "M"
|
||||||
|
default:
|
||||||
|
scaled = float64(x) / 1024 / 1024 / 1024
|
||||||
|
suffix = "G"
|
||||||
|
}
|
||||||
|
if math.Floor(scaled) == scaled {
|
||||||
|
return fmt.Sprintf("%.0f", scaled), suffix
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.3f", scaled), suffix
|
||||||
|
}
|
||||||
|
|
||||||
|
// String turns SizeSuffix into a string
|
||||||
|
func (x SizeSuffix) String() string {
|
||||||
|
val, suffix := x.string()
|
||||||
|
return val + suffix
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unit turns SizeSuffix into a string with a unit
|
||||||
|
func (x SizeSuffix) Unit(unit string) string {
|
||||||
|
val, suffix := x.string()
|
||||||
|
if val == "off" {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
return val + " " + suffix + unit
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a SizeSuffix
|
||||||
|
func (x *SizeSuffix) Set(s string) error {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return errors.New("empty string")
|
||||||
|
}
|
||||||
|
if strings.ToLower(s) == "off" {
|
||||||
|
*x = -1
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
suffix := s[len(s)-1]
|
||||||
|
suffixLen := 1
|
||||||
|
var multiplier float64
|
||||||
|
switch suffix {
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
|
||||||
|
suffixLen = 0
|
||||||
|
multiplier = 1 << 10
|
||||||
|
case 'b', 'B':
|
||||||
|
multiplier = 1
|
||||||
|
case 'k', 'K':
|
||||||
|
multiplier = 1 << 10
|
||||||
|
case 'm', 'M':
|
||||||
|
multiplier = 1 << 20
|
||||||
|
case 'g', 'G':
|
||||||
|
multiplier = 1 << 30
|
||||||
|
default:
|
||||||
|
return errors.Errorf("bad suffix %q", suffix)
|
||||||
|
}
|
||||||
|
s = s[:len(s)-suffixLen]
|
||||||
|
value, err := strconv.ParseFloat(s, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if value < 0 {
|
||||||
|
return errors.Errorf("size can't be negative %q", s)
|
||||||
|
}
|
||||||
|
value *= multiplier
|
||||||
|
*x = SizeSuffix(value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type of the value
|
||||||
|
func (x *SizeSuffix) Type() string {
|
||||||
|
return "int64"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check it satisfies the interface
|
||||||
|
var _ pflag.Value = (*SizeSuffix)(nil)
|
||||||
|
|
||||||
|
// BwTimeSlot represents a bandwidth configuration at a point in time.
|
||||||
|
type BwTimeSlot struct {
|
||||||
|
hhmm int
|
||||||
|
bandwidth SizeSuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
// BwTimetable contains all configured time slots.
|
||||||
|
type BwTimetable []BwTimeSlot
|
||||||
|
|
||||||
|
// String returns a printable representation of BwTimetable.
|
||||||
|
func (x BwTimetable) String() string {
|
||||||
|
ret := []string{}
|
||||||
|
for _, ts := range x {
|
||||||
|
ret = append(ret, fmt.Sprintf("%04.4d,%s", ts.hhmm, ts.bandwidth.String()))
|
||||||
|
}
|
||||||
|
return strings.Join(ret, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the bandwidth timetable.
|
||||||
|
func (x *BwTimetable) Set(s string) error {
|
||||||
|
// The timetable is formatted as:
|
||||||
|
// "hh:mm,bandwidth hh:mm,banwidth..." ex: "10:00,10G 11:30,1G 18:00,off"
|
||||||
|
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
|
||||||
|
|
||||||
|
if len(s) == 0 {
|
||||||
|
return errors.New("empty string")
|
||||||
|
}
|
||||||
|
// Single value without time specification.
|
||||||
|
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
|
||||||
|
ts := BwTimeSlot{}
|
||||||
|
if err := ts.bandwidth.Set(s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ts.hhmm = 0
|
||||||
|
*x = BwTimetable{ts}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tok := range strings.Split(s, " ") {
|
||||||
|
tv := strings.Split(tok, ",")
|
||||||
|
|
||||||
|
// Format must be HH:MM,BW
|
||||||
|
if len(tv) != 2 {
|
||||||
|
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basic timespec sanity checking
|
||||||
|
hhmm := tv[0]
|
||||||
|
if len(hhmm) != 5 {
|
||||||
|
return errors.Errorf("invalid time specification (hh:mm): %q", hhmm)
|
||||||
|
}
|
||||||
|
hh, err := strconv.Atoi(hhmm[0:2])
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("invalid hour in time specification %q: %v", hhmm, err)
|
||||||
|
}
|
||||||
|
if hh < 0 || hh > 23 {
|
||||||
|
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
|
||||||
|
}
|
||||||
|
mm, err := strconv.Atoi(hhmm[3:])
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("invalid minute in time specification: %q: %v", hhmm, err)
|
||||||
|
}
|
||||||
|
if mm < 0 || mm > 59 {
|
||||||
|
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := BwTimeSlot{
|
||||||
|
hhmm: (hh * 100) + mm,
|
||||||
|
}
|
||||||
|
// Bandwidth limit for this time slot.
|
||||||
|
if err := ts.bandwidth.Set(tv[1]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*x = append(*x, ts)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LimitAt returns a BwTimeSlot for the time requested.
|
||||||
|
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
|
||||||
|
// If the timetable is empty, we return an unlimited BwTimeSlot starting at midnight.
|
||||||
|
if len(x) == 0 {
|
||||||
|
return BwTimeSlot{hhmm: 0, bandwidth: -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
hhmm := tt.Hour()*100 + tt.Minute()
|
||||||
|
|
||||||
|
// By default, we return the last element in the timetable. This
|
||||||
|
// satisfies two conditions: 1) If there's only one element it
|
||||||
|
// will always be selected, and 2) The last element of the table
|
||||||
|
// will "wrap around" until overriden by an earlier time slot.
|
||||||
|
// there's only one time slot in the timetable.
|
||||||
|
ret := x[len(x)-1]
|
||||||
|
|
||||||
|
mindif := 0
|
||||||
|
first := true
|
||||||
|
|
||||||
|
// Look for most recent time slot.
|
||||||
|
for _, ts := range x {
|
||||||
|
// Ignore the past
|
||||||
|
if hhmm < ts.hhmm {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dif := ((hhmm / 100 * 60) + (hhmm % 100)) - ((ts.hhmm / 100 * 60) + (ts.hhmm % 100))
|
||||||
|
if first {
|
||||||
|
mindif = dif
|
||||||
|
first = false
|
||||||
|
}
|
||||||
|
if dif <= mindif {
|
||||||
|
mindif = dif
|
||||||
|
ret = ts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type of the value
|
||||||
|
func (x BwTimetable) Type() string {
|
||||||
|
return "BwTimetable"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check it satisfies the interface
|
||||||
|
var _ pflag.Value = (*BwTimetable)(nil)
|
||||||
|
|
||||||
|
// optionToEnv converts an option name, eg "ignore-size" into an
|
||||||
|
// environment name "RCLONE_IGNORE_SIZE"
|
||||||
|
func optionToEnv(name string) string {
|
||||||
|
return "RCLONE_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// setDefaultFromEnv constructs a name from the flag passed in and
|
||||||
|
// sets the default from the environment if possible.
|
||||||
|
func setDefaultFromEnv(name string) {
|
||||||
|
key := optionToEnv(name)
|
||||||
|
newValue, found := os.LookupEnv(key)
|
||||||
|
if found {
|
||||||
|
flag := pflag.Lookup(name)
|
||||||
|
if flag == nil {
|
||||||
|
log.Fatalf("Couldn't find flag %q", name)
|
||||||
|
}
|
||||||
|
err := flag.Value.Set(newValue)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Invalid value for environment variable %q: %v", key, err)
|
||||||
|
}
|
||||||
|
// log.Printf("Set default for %q from %q to %q (%v)", name, key, newValue, flag.Value)
|
||||||
|
flag.DefValue = newValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringP defines a flag which can be overridden by an environment variable
|
||||||
|
//
|
||||||
|
// It is a thin wrapper around pflag.StringP
|
||||||
|
func StringP(name, shorthand string, value string, usage string) (out *string) {
|
||||||
|
out = pflag.StringP(name, shorthand, value, usage)
|
||||||
|
setDefaultFromEnv(name)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolP defines a flag which can be overridden by an environment variable
|
||||||
|
//
|
||||||
|
// It is a thin wrapper around pflag.BoolP
|
||||||
|
func BoolP(name, shorthand string, value bool, usage string) (out *bool) {
|
||||||
|
out = pflag.BoolP(name, shorthand, value, usage)
|
||||||
|
setDefaultFromEnv(name)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntP defines a flag which can be overridden by an environment variable
|
||||||
|
//
|
||||||
|
// It is a thin wrapper around pflag.IntP
|
||||||
|
func IntP(name, shorthand string, value int, usage string) (out *int) {
|
||||||
|
out = pflag.IntP(name, shorthand, value, usage)
|
||||||
|
setDefaultFromEnv(name)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DurationP defines a flag which can be overridden by an environment variable
|
||||||
|
//
|
||||||
|
// It is a thin wrapper around pflag.DurationP
|
||||||
|
func DurationP(name, shorthand string, value time.Duration, usage string) (out *time.Duration) {
|
||||||
|
out = pflag.DurationP(name, shorthand, value, usage)
|
||||||
|
setDefaultFromEnv(name)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// VarP defines a flag which can be overridden by an environment variable
|
||||||
|
//
|
||||||
|
// It is a thin wrapper around pflag.VarP
|
||||||
|
func VarP(value pflag.Value, name, shorthand, usage string) {
|
||||||
|
pflag.VarP(value, name, shorthand, usage)
|
||||||
|
setDefaultFromEnv(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringArrayP defines a flag which can be overridden by an environment variable
|
||||||
|
//
|
||||||
|
// It sets one value only - command line flags can be used to set more.
|
||||||
|
//
|
||||||
|
// It is a thin wrapper around pflag.StringArrayP
|
||||||
|
func StringArrayP(name, shorthand string, value []string, usage string) (out *[]string) {
|
||||||
|
out = pflag.StringArrayP(name, shorthand, value, usage)
|
||||||
|
setDefaultFromEnv(name)
|
||||||
|
return out
|
||||||
|
}
|
187
fs/flags_test.go
Normal file
187
fs/flags_test.go
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSizeSuffixString(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in float64
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{0, "0"},
|
||||||
|
{102, "102"},
|
||||||
|
{1024, "1k"},
|
||||||
|
{1024 * 1024, "1M"},
|
||||||
|
{1024 * 1024 * 1024, "1G"},
|
||||||
|
{10 * 1024 * 1024 * 1024, "10G"},
|
||||||
|
{10.1 * 1024 * 1024 * 1024, "10.100G"},
|
||||||
|
{-1, "off"},
|
||||||
|
{-100, "off"},
|
||||||
|
} {
|
||||||
|
ss := SizeSuffix(test.in)
|
||||||
|
got := ss.String()
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSizeSuffixUnit(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in float64
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{0, "0 Bytes"},
|
||||||
|
{102, "102 Bytes"},
|
||||||
|
{1024, "1 kBytes"},
|
||||||
|
{1024 * 1024, "1 MBytes"},
|
||||||
|
{1024 * 1024 * 1024, "1 GBytes"},
|
||||||
|
{10 * 1024 * 1024 * 1024, "10 GBytes"},
|
||||||
|
{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
|
||||||
|
{-1, "off"},
|
||||||
|
{-100, "off"},
|
||||||
|
} {
|
||||||
|
ss := SizeSuffix(test.in)
|
||||||
|
got := ss.Unit("Bytes")
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSizeSuffixSet(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
want int64
|
||||||
|
err bool
|
||||||
|
}{
|
||||||
|
{"0", 0, false},
|
||||||
|
{"1b", 1, false},
|
||||||
|
{"102B", 102, false},
|
||||||
|
{"0.1k", 102, false},
|
||||||
|
{"0.1", 102, false},
|
||||||
|
{"1K", 1024, false},
|
||||||
|
{"1", 1024, false},
|
||||||
|
{"2.5", 1024 * 2.5, false},
|
||||||
|
{"1M", 1024 * 1024, false},
|
||||||
|
{"1.g", 1024 * 1024 * 1024, false},
|
||||||
|
{"10G", 10 * 1024 * 1024 * 1024, false},
|
||||||
|
{"off", -1, false},
|
||||||
|
{"OFF", -1, false},
|
||||||
|
{"", 0, true},
|
||||||
|
{"1p", 0, true},
|
||||||
|
{"1.p", 0, true},
|
||||||
|
{"1p", 0, true},
|
||||||
|
{"-1K", 0, true},
|
||||||
|
} {
|
||||||
|
ss := SizeSuffix(0)
|
||||||
|
err := ss.Set(test.in)
|
||||||
|
if test.err {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, test.want, int64(ss))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBwTimetableSet(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
want BwTimetable
|
||||||
|
err bool
|
||||||
|
}{
|
||||||
|
{"", BwTimetable{}, true},
|
||||||
|
{"0", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 0}}, false},
|
||||||
|
{"666", BwTimetable{BwTimeSlot{hhmm: 0, bandwidth: 666 * 1024}}, false},
|
||||||
|
{"10:20,666", BwTimetable{BwTimeSlot{hhmm: 1020, bandwidth: 666 * 1024}}, false},
|
||||||
|
{
|
||||||
|
"11:00,333 13:40,666 23:50,10M 23:59,off",
|
||||||
|
BwTimetable{
|
||||||
|
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 1340, bandwidth: 666 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2350, bandwidth: 10 * 1024 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2359, bandwidth: -1},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{"bad,bad", BwTimetable{}, true},
|
||||||
|
{"bad bad", BwTimetable{}, true},
|
||||||
|
{"bad", BwTimetable{}, true},
|
||||||
|
{"1000X", BwTimetable{}, true},
|
||||||
|
{"2401,666", BwTimetable{}, true},
|
||||||
|
{"1061,666", BwTimetable{}, true},
|
||||||
|
} {
|
||||||
|
tt := BwTimetable{}
|
||||||
|
err := tt.Set(test.in)
|
||||||
|
if test.err {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, test.want, tt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBwTimetableLimitAt(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
tt BwTimetable
|
||||||
|
now time.Time
|
||||||
|
want BwTimeSlot
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
BwTimetable{},
|
||||||
|
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
||||||
|
BwTimeSlot{hhmm: 0, bandwidth: -1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BwTimetable{BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024}},
|
||||||
|
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
||||||
|
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BwTimetable{
|
||||||
|
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
||||||
|
},
|
||||||
|
time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
|
||||||
|
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BwTimetable{
|
||||||
|
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
||||||
|
},
|
||||||
|
time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
|
||||||
|
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BwTimetable{
|
||||||
|
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
||||||
|
},
|
||||||
|
time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
|
||||||
|
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BwTimetable{
|
||||||
|
BwTimeSlot{hhmm: 1100, bandwidth: 333 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 1300, bandwidth: 666 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2301, bandwidth: 1024 * 1024},
|
||||||
|
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
||||||
|
},
|
||||||
|
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
|
||||||
|
BwTimeSlot{hhmm: 2350, bandwidth: -1},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
slot := test.tt.LimitAt(test.now)
|
||||||
|
assert.Equal(t, test.want, slot)
|
||||||
|
}
|
||||||
|
}
|
@ -9,11 +9,10 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
oneFileSystem = pflag.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
oneFileSystem = fs.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||||
)
|
)
|
||||||
|
|
||||||
// readDevice turns a valid os.FileInfo into a device number,
|
// readDevice turns a valid os.FileInfo into a device number,
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"github.com/ncw/rclone/pacer"
|
"github.com/ncw/rclone/pacer"
|
||||||
"github.com/ncw/rclone/rest"
|
"github.com/ncw/rclone/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -75,8 +74,8 @@ func init() {
|
|||||||
Help: "Microsoft App Client Secret - leave blank normally.",
|
Help: "Microsoft App Client Secret - leave blank normally.",
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
pflag.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
fs.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
||||||
pflag.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
|
fs.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote one drive
|
// Fs represents a remote one drive
|
||||||
|
5
s3/s3.go
5
s3/s3.go
@ -36,7 +36,6 @@ import (
|
|||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -210,8 +209,8 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Flags
|
// Flags
|
||||||
s3ACL = pflag.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
|
s3ACL = fs.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
|
||||||
s3StorageClass = pflag.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
|
s3StorageClass = fs.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@ -83,7 +82,7 @@ func init() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
|
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
|
||||||
pflag.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
|
fs.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote swift server
|
// Fs represents a remote swift server
|
||||||
|
Loading…
Reference in New Issue
Block a user