2012-11-18 18:32:31 +01:00
|
|
|
// Sync files and directories to and from swift
|
|
|
|
//
|
|
|
|
// Nick Craig-Wood <nick@craig-wood.com>
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"os"
|
2012-11-29 23:13:58 +01:00
|
|
|
"runtime"
|
2012-11-18 18:32:31 +01:00
|
|
|
"runtime/pprof"
|
2012-11-28 12:17:31 +01:00
|
|
|
"strings"
|
2012-11-29 23:13:58 +01:00
|
|
|
"sync"
|
2013-01-03 23:50:00 +01:00
|
|
|
"time"
|
2012-11-18 18:32:31 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// Globals
|
|
|
|
var (
|
|
|
|
// Flags
|
2013-01-03 23:50:00 +01:00
|
|
|
cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file")
|
|
|
|
snet = flag.Bool("snet", false, "Use internal service network") // FIXME not implemented
|
|
|
|
verbose = flag.Bool("verbose", false, "Print lots more stuff")
|
|
|
|
quiet = flag.Bool("quiet", false, "Print as little stuff as possible")
|
|
|
|
dry_run = flag.Bool("dry-run", false, "Do a trial run with no permanent changes")
|
|
|
|
checkers = flag.Int("checkers", 8, "Number of checkers to run in parallel.")
|
|
|
|
transfers = flag.Int("transfers", 4, "Number of file transfers to run in parallel.")
|
|
|
|
statsInterval = flag.Duration("stats", time.Minute*1, "Interval to print stats")
|
2012-11-18 18:32:31 +01:00
|
|
|
)
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// Read FsObjects~s on in send to out if they need uploading
|
2012-11-29 23:13:58 +01:00
|
|
|
//
|
|
|
|
// FIXME potentially doing lots of MD5SUMS at once
|
2012-12-26 13:23:58 +01:00
|
|
|
func Checker(in, out FsObjectsChan, fdst Fs, wg *sync.WaitGroup) {
|
2012-11-29 23:13:58 +01:00
|
|
|
defer wg.Done()
|
2013-01-03 23:50:00 +01:00
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
for src := range in {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Checking(src)
|
2012-12-26 13:23:58 +01:00
|
|
|
dst := fdst.NewFsObject(src.Remote())
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.DoneChecking(src)
|
2012-12-26 13:23:58 +01:00
|
|
|
if dst == nil {
|
2012-12-31 17:40:34 +01:00
|
|
|
FsDebug(src, "Couldn't find local file - download")
|
2012-12-26 13:23:58 +01:00
|
|
|
out <- src
|
|
|
|
continue
|
2012-12-23 10:32:33 +01:00
|
|
|
}
|
|
|
|
|
2012-11-29 23:13:58 +01:00
|
|
|
// Check to see if can store this
|
2012-12-26 13:23:58 +01:00
|
|
|
if !src.Storable() {
|
2012-11-29 23:13:58 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Check to see if changed or not
|
2012-12-26 13:23:58 +01:00
|
|
|
if Equal(src, dst) {
|
2012-12-31 17:40:34 +01:00
|
|
|
FsDebug(src, "Unchanged skipping")
|
2012-11-29 23:13:58 +01:00
|
|
|
continue
|
|
|
|
}
|
2012-12-26 13:23:58 +01:00
|
|
|
out <- src
|
2012-11-29 23:13:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// Read FsObjects on in and copy them
|
|
|
|
func Copier(in FsObjectsChan, fdst Fs, wg *sync.WaitGroup) {
|
2012-11-29 23:13:58 +01:00
|
|
|
defer wg.Done()
|
2012-12-26 13:23:58 +01:00
|
|
|
for src := range in {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Transferring(src)
|
2013-01-10 22:58:46 +01:00
|
|
|
Copy(fdst, src)
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.DoneTransferring(src)
|
2012-11-20 23:40:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// Copies fsrc into fdst
|
2013-01-10 22:58:46 +01:00
|
|
|
func CopyFs(fdst, fsrc Fs) {
|
2012-12-26 13:23:58 +01:00
|
|
|
err := fdst.Mkdir()
|
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-26 13:23:58 +01:00
|
|
|
log.Fatal("Failed to make destination")
|
|
|
|
}
|
|
|
|
|
|
|
|
to_be_checked := fsrc.List()
|
2012-12-23 10:32:33 +01:00
|
|
|
to_be_uploaded := make(FsObjectsChan, *transfers)
|
2012-11-29 23:13:58 +01:00
|
|
|
|
|
|
|
var checkerWg sync.WaitGroup
|
|
|
|
checkerWg.Add(*checkers)
|
|
|
|
for i := 0; i < *checkers; i++ {
|
2012-12-26 13:23:58 +01:00
|
|
|
go Checker(to_be_checked, to_be_uploaded, fdst, &checkerWg)
|
2012-11-29 23:13:58 +01:00
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
var copierWg sync.WaitGroup
|
|
|
|
copierWg.Add(*transfers)
|
2012-12-23 10:32:33 +01:00
|
|
|
for i := 0; i < *transfers; i++ {
|
2012-12-26 13:23:58 +01:00
|
|
|
go Copier(to_be_uploaded, fdst, &copierWg)
|
2012-11-29 23:13:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("Waiting for checks to finish")
|
|
|
|
checkerWg.Wait()
|
|
|
|
close(to_be_uploaded)
|
2012-12-26 13:23:58 +01:00
|
|
|
log.Printf("Waiting for transfers to finish")
|
|
|
|
copierWg.Wait()
|
2012-11-29 23:13:58 +01:00
|
|
|
}
|
|
|
|
|
2012-12-31 17:40:34 +01:00
|
|
|
// Delete all the files passed in the channel
|
|
|
|
func DeleteFiles(to_be_deleted FsObjectsChan) {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(*transfers)
|
|
|
|
for i := 0; i < *transfers; i++ {
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for dst := range to_be_deleted {
|
|
|
|
if *dry_run {
|
|
|
|
FsDebug(dst, "Not deleting as -dry-run")
|
|
|
|
} else {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Checking(dst)
|
2012-12-31 17:40:34 +01:00
|
|
|
err := dst.Remove()
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.DoneChecking(dst)
|
2012-12-31 17:40:34 +01:00
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-31 17:40:34 +01:00
|
|
|
FsLog(dst, "Couldn't delete: %s", err)
|
|
|
|
} else {
|
|
|
|
FsDebug(dst, "Deleted")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("Waiting for deletions to finish")
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Syncs fsrc into fdst
|
|
|
|
func Sync(fdst, fsrc Fs) {
|
|
|
|
err := fdst.Mkdir()
|
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-31 17:40:34 +01:00
|
|
|
log.Fatal("Failed to make destination")
|
|
|
|
}
|
|
|
|
|
2013-01-03 23:50:00 +01:00
|
|
|
log.Printf("Building file list")
|
|
|
|
|
2012-12-31 17:40:34 +01:00
|
|
|
// Read the destination files first
|
|
|
|
// FIXME could do this in parallel and make it use less memory
|
|
|
|
delFiles := make(map[string]FsObject)
|
2012-12-31 18:31:19 +01:00
|
|
|
for dst := range fdst.List() {
|
|
|
|
delFiles[dst.Remote()] = dst
|
2012-12-31 17:40:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read source files checking them off against dest files
|
|
|
|
to_be_checked := make(FsObjectsChan, *transfers)
|
|
|
|
go func() {
|
2012-12-31 18:31:19 +01:00
|
|
|
for src := range fsrc.List() {
|
|
|
|
delete(delFiles, src.Remote())
|
|
|
|
to_be_checked <- src
|
2012-12-31 17:40:34 +01:00
|
|
|
}
|
|
|
|
close(to_be_checked)
|
|
|
|
}()
|
|
|
|
|
|
|
|
to_be_uploaded := make(FsObjectsChan, *transfers)
|
|
|
|
|
|
|
|
var checkerWg sync.WaitGroup
|
|
|
|
checkerWg.Add(*checkers)
|
|
|
|
for i := 0; i < *checkers; i++ {
|
|
|
|
go Checker(to_be_checked, to_be_uploaded, fdst, &checkerWg)
|
|
|
|
}
|
|
|
|
|
|
|
|
var copierWg sync.WaitGroup
|
|
|
|
copierWg.Add(*transfers)
|
|
|
|
for i := 0; i < *transfers; i++ {
|
|
|
|
go Copier(to_be_uploaded, fdst, &copierWg)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("Waiting for checks to finish")
|
|
|
|
checkerWg.Wait()
|
|
|
|
close(to_be_uploaded)
|
|
|
|
log.Printf("Waiting for transfers to finish")
|
|
|
|
copierWg.Wait()
|
|
|
|
|
2013-01-10 23:14:23 +01:00
|
|
|
if stats.errors != 0 {
|
|
|
|
log.Printf("Not deleting files as there were IO errors")
|
|
|
|
return
|
|
|
|
}
|
2012-12-31 17:40:34 +01:00
|
|
|
|
|
|
|
// Delete the spare files
|
|
|
|
toDelete := make(FsObjectsChan, *transfers)
|
|
|
|
go func() {
|
|
|
|
for _, fs := range delFiles {
|
|
|
|
toDelete <- fs
|
|
|
|
}
|
|
|
|
close(toDelete)
|
|
|
|
}()
|
|
|
|
DeleteFiles(toDelete)
|
2012-11-20 23:40:48 +01:00
|
|
|
}
|
|
|
|
|
2012-12-31 18:31:19 +01:00
|
|
|
// Checks the files in fsrc and fdst according to Size and MD5SUM
|
|
|
|
func Check(fdst, fsrc Fs) {
|
2013-01-03 23:50:00 +01:00
|
|
|
log.Printf("Building file list")
|
|
|
|
|
2012-12-31 18:31:19 +01:00
|
|
|
// Read the destination files first
|
|
|
|
// FIXME could do this in parallel and make it use less memory
|
|
|
|
dstFiles := make(map[string]FsObject)
|
|
|
|
for dst := range fdst.List() {
|
|
|
|
dstFiles[dst.Remote()] = dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the source files checking them against dstFiles
|
|
|
|
// FIXME could do this in parallel and make it use less memory
|
|
|
|
srcFiles := make(map[string]FsObject)
|
|
|
|
commonFiles := make(map[string][]FsObject)
|
|
|
|
for src := range fsrc.List() {
|
|
|
|
remote := src.Remote()
|
|
|
|
if dst, ok := dstFiles[remote]; ok {
|
|
|
|
commonFiles[remote] = []FsObject{dst, src}
|
|
|
|
delete(dstFiles, remote)
|
|
|
|
} else {
|
|
|
|
srcFiles[remote] = src
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("Files in %s but not in %s", fdst, fsrc)
|
|
|
|
for remote := range dstFiles {
|
2013-01-10 23:14:23 +01:00
|
|
|
stats.Error()
|
2012-12-31 18:31:19 +01:00
|
|
|
log.Printf(remote)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("Files in %s but not in %s", fsrc, fdst)
|
|
|
|
for remote := range srcFiles {
|
2013-01-10 23:14:23 +01:00
|
|
|
stats.Error()
|
2012-12-31 18:31:19 +01:00
|
|
|
log.Printf(remote)
|
|
|
|
}
|
|
|
|
|
|
|
|
checks := make(chan []FsObject, *transfers)
|
|
|
|
go func() {
|
|
|
|
for _, check := range commonFiles {
|
|
|
|
checks <- check
|
|
|
|
}
|
|
|
|
close(checks)
|
|
|
|
}()
|
|
|
|
|
|
|
|
var checkerWg sync.WaitGroup
|
|
|
|
checkerWg.Add(*checkers)
|
|
|
|
for i := 0; i < *checkers; i++ {
|
|
|
|
go func() {
|
|
|
|
defer checkerWg.Done()
|
|
|
|
for check := range checks {
|
|
|
|
dst, src := check[0], check[1]
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Checking(src)
|
2012-12-31 18:31:19 +01:00
|
|
|
if src.Size() != dst.Size() {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.DoneChecking(src)
|
2013-01-10 23:14:23 +01:00
|
|
|
stats.Error()
|
2012-12-31 18:31:19 +01:00
|
|
|
FsLog(src, "Sizes differ")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
same, err := CheckMd5sums(src, dst)
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.DoneChecking(src)
|
2012-12-31 18:31:19 +01:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !same {
|
2013-01-10 23:14:23 +01:00
|
|
|
stats.Error()
|
2012-12-31 18:31:19 +01:00
|
|
|
FsLog(src, "Md5sums differ")
|
|
|
|
}
|
|
|
|
FsDebug(src, "OK")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("Waiting for checks to finish")
|
|
|
|
checkerWg.Wait()
|
2013-01-10 23:14:23 +01:00
|
|
|
log.Printf("%d differences found", stats.errors)
|
2012-12-31 18:31:19 +01:00
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// List the Fs to stdout
|
2013-01-02 16:34:25 +01:00
|
|
|
//
|
|
|
|
// Lists in parallel which may get them out of order
|
2012-12-26 13:23:58 +01:00
|
|
|
func List(f Fs) {
|
|
|
|
in := f.List()
|
2013-01-02 16:34:25 +01:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(*checkers)
|
|
|
|
for i := 0; i < *checkers; i++ {
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for fs := range in {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Checking(fs)
|
2013-01-02 16:34:25 +01:00
|
|
|
modTime := fs.ModTime()
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.DoneChecking(fs)
|
2013-01-02 16:34:25 +01:00
|
|
|
fmt.Printf("%9d %19s %s\n", fs.Size(), modTime.Format("2006-01-02 15:04:05"), fs.Remote())
|
|
|
|
}
|
|
|
|
}()
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
2013-01-02 16:34:25 +01:00
|
|
|
wg.Wait()
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2012-11-20 23:40:48 +01:00
|
|
|
// Lists files in a container
|
2012-12-29 12:35:41 +01:00
|
|
|
func list(fdst, fsrc Fs) {
|
|
|
|
if fdst == nil {
|
2013-01-08 19:53:35 +01:00
|
|
|
// FIXMESwiftContainers()
|
|
|
|
S3Buckets()
|
2012-12-04 00:58:17 +01:00
|
|
|
return
|
|
|
|
}
|
2012-12-29 12:35:41 +01:00
|
|
|
List(fdst)
|
2012-11-20 23:40:48 +01:00
|
|
|
}
|
|
|
|
|
2012-12-29 12:35:41 +01:00
|
|
|
// Makes a destination directory or container
|
|
|
|
func mkdir(fdst, fsrc Fs) {
|
2012-12-26 13:23:58 +01:00
|
|
|
err := fdst.Mkdir()
|
2012-11-20 23:40:48 +01:00
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-29 12:35:41 +01:00
|
|
|
log.Fatalf("Mkdir failed: %s", err)
|
2012-11-20 23:40:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-29 12:35:41 +01:00
|
|
|
// Removes a container but not if not empty
|
|
|
|
func rmdir(fdst, fsrc Fs) {
|
2012-12-31 17:40:34 +01:00
|
|
|
if *dry_run {
|
|
|
|
log.Printf("Not deleting %s as -dry-run", fdst)
|
|
|
|
} else {
|
|
|
|
err := fdst.Rmdir()
|
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-31 17:40:34 +01:00
|
|
|
log.Fatalf("Rmdir failed: %s", err)
|
|
|
|
}
|
2012-11-20 23:40:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-04 01:09:22 +01:00
|
|
|
// Removes a container and all of its contents
|
|
|
|
//
|
2012-12-29 12:35:41 +01:00
|
|
|
// FIXME doesn't delete local directories
|
|
|
|
func purge(fdst, fsrc Fs) {
|
2012-12-31 17:40:34 +01:00
|
|
|
DeleteFiles(fdst.List())
|
2012-12-29 12:35:41 +01:00
|
|
|
log.Printf("Deleting path")
|
|
|
|
rmdir(fdst, fsrc)
|
2012-12-04 01:09:22 +01:00
|
|
|
}
|
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
type Command struct {
|
|
|
|
name string
|
|
|
|
help string
|
2012-12-29 12:35:41 +01:00
|
|
|
run func(fdst, fsrc Fs)
|
2012-12-04 00:58:17 +01:00
|
|
|
minArgs, maxArgs int
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkArgs checks there are enough arguments and prints a message if not
|
|
|
|
func (cmd *Command) checkArgs(args []string) {
|
|
|
|
if len(args) < cmd.minArgs {
|
|
|
|
syntaxError()
|
|
|
|
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments mininum\n", cmd.name, cmd.minArgs)
|
|
|
|
os.Exit(1)
|
|
|
|
} else if len(args) > cmd.maxArgs {
|
|
|
|
syntaxError()
|
|
|
|
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.name, cmd.maxArgs)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var Commands = []Command{
|
|
|
|
{
|
2012-12-29 12:35:41 +01:00
|
|
|
"copy",
|
|
|
|
`<source> <destination>
|
|
|
|
|
|
|
|
Copy the source to the destination. Doesn't transfer
|
|
|
|
unchanged files, testing first by modification time then by
|
|
|
|
MD5SUM. Doesn't delete files from the destination.
|
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
`,
|
2013-01-10 22:58:46 +01:00
|
|
|
CopyFs,
|
2012-12-31 17:40:34 +01:00
|
|
|
2, 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"sync",
|
|
|
|
`<source> <destination>
|
|
|
|
|
|
|
|
Sync the source to the destination. Doesn't transfer
|
|
|
|
unchanged files, testing first by modification time then by
|
|
|
|
MD5SUM. Deletes any files that exist in source that don't
|
|
|
|
exist in destination. Since this can cause data loss, test
|
2012-12-31 18:31:19 +01:00
|
|
|
first with the -dry-run flag.`,
|
2012-12-31 17:40:34 +01:00
|
|
|
|
|
|
|
Sync,
|
2012-12-04 00:58:17 +01:00
|
|
|
2, 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"ls",
|
2012-12-29 12:35:41 +01:00
|
|
|
`[<path>]
|
|
|
|
|
|
|
|
List the path. If no parameter is supplied then it lists the
|
2012-12-31 18:31:19 +01:00
|
|
|
available swift containers.`,
|
2012-12-29 12:35:41 +01:00
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
list,
|
|
|
|
0, 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"mkdir",
|
2012-12-29 12:35:41 +01:00
|
|
|
`<path>
|
|
|
|
|
2012-12-31 18:31:19 +01:00
|
|
|
Make the path if it doesn't already exist`,
|
2012-12-29 12:35:41 +01:00
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
mkdir,
|
|
|
|
1, 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"rmdir",
|
2012-12-29 12:35:41 +01:00
|
|
|
`<path>
|
|
|
|
|
|
|
|
Remove the path. Note that you can't remove a path with
|
2012-12-31 18:31:19 +01:00
|
|
|
objects in it, use purge for that.`,
|
2012-12-29 12:35:41 +01:00
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
rmdir,
|
|
|
|
1, 1,
|
|
|
|
},
|
2012-12-04 01:09:22 +01:00
|
|
|
{
|
|
|
|
"purge",
|
2012-12-29 12:35:41 +01:00
|
|
|
`<path>
|
|
|
|
|
2012-12-31 18:31:19 +01:00
|
|
|
Remove the path and all of its contents.`,
|
2012-12-29 12:35:41 +01:00
|
|
|
|
2012-12-04 01:09:22 +01:00
|
|
|
purge,
|
|
|
|
1, 1,
|
|
|
|
},
|
2012-12-31 18:31:19 +01:00
|
|
|
{
|
|
|
|
"check",
|
|
|
|
`<source> <destination>
|
|
|
|
|
|
|
|
Checks the files in the source and destination match. It
|
|
|
|
compares sizes and MD5SUMs and prints a report of files which
|
|
|
|
don't match. It doesn't alter the source or destination.`,
|
|
|
|
|
|
|
|
Check,
|
|
|
|
2, 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"help",
|
|
|
|
`
|
|
|
|
|
|
|
|
This help.`,
|
|
|
|
|
|
|
|
nil,
|
|
|
|
0, 0,
|
|
|
|
},
|
2012-12-04 00:58:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// syntaxError prints the syntax
|
|
|
|
func syntaxError() {
|
|
|
|
fmt.Fprintf(os.Stderr, `Sync files and directories to and from swift
|
|
|
|
|
|
|
|
Syntax: [options] subcommand <parameters> <parameters...>
|
|
|
|
|
|
|
|
Subcommands:
|
2012-12-29 12:35:41 +01:00
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
`)
|
|
|
|
for i := range Commands {
|
|
|
|
cmd := &Commands[i]
|
2012-12-31 18:31:19 +01:00
|
|
|
fmt.Fprintf(os.Stderr, " %s: %s\n\n", cmd.name, cmd.help)
|
2012-12-04 00:58:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Fprintf(os.Stderr, "Options:\n")
|
|
|
|
flag.PrintDefaults()
|
|
|
|
fmt.Fprintf(os.Stderr, `
|
|
|
|
It is only necessary to use a unique prefix of the subcommand, eg 'up' for 'upload'.
|
|
|
|
`)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exit with the message
|
|
|
|
func fatal(message string, args ...interface{}) {
|
|
|
|
syntaxError()
|
|
|
|
fmt.Fprintf(os.Stderr, message, args...)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
2012-11-18 18:32:31 +01:00
|
|
|
func main() {
|
|
|
|
flag.Usage = syntaxError
|
|
|
|
flag.Parse()
|
2012-11-20 23:40:48 +01:00
|
|
|
args := flag.Args()
|
2012-11-29 23:13:58 +01:00
|
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
2012-11-18 18:32:31 +01:00
|
|
|
|
|
|
|
// Setup profiling if desired
|
|
|
|
if *cpuprofile != "" {
|
|
|
|
f, err := os.Create(*cpuprofile)
|
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-11-18 18:32:31 +01:00
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
pprof.StartCPUProfile(f)
|
|
|
|
defer pprof.StopCPUProfile()
|
|
|
|
}
|
|
|
|
|
2012-11-20 23:40:48 +01:00
|
|
|
if len(args) < 1 {
|
|
|
|
fatal("No command supplied\n")
|
|
|
|
}
|
2012-11-18 18:32:31 +01:00
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
cmd := strings.ToLower(args[0])
|
2012-11-20 23:40:48 +01:00
|
|
|
args = args[1:]
|
|
|
|
|
2012-12-04 00:58:17 +01:00
|
|
|
// Find the command doing a prefix match
|
|
|
|
var found *Command
|
|
|
|
for i := range Commands {
|
|
|
|
command := &Commands[i]
|
|
|
|
// exact command name found - use that
|
|
|
|
if command.name == cmd {
|
|
|
|
found = command
|
|
|
|
break
|
|
|
|
} else if strings.HasPrefix(command.name, cmd) {
|
|
|
|
if found != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-04 00:58:17 +01:00
|
|
|
log.Fatalf("Not unique - matches multiple commands %q", cmd)
|
|
|
|
}
|
|
|
|
found = command
|
2012-11-20 23:40:48 +01:00
|
|
|
}
|
|
|
|
}
|
2012-12-04 00:58:17 +01:00
|
|
|
if found == nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-04 00:58:17 +01:00
|
|
|
log.Fatalf("Unknown command %q", cmd)
|
|
|
|
}
|
|
|
|
found.checkArgs(args)
|
2012-12-29 12:35:41 +01:00
|
|
|
|
|
|
|
// Make source and destination fs
|
|
|
|
var fdst, fsrc Fs
|
|
|
|
var err error
|
|
|
|
if len(args) >= 1 {
|
|
|
|
fdst, err = NewFs(args[0])
|
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-29 12:35:41 +01:00
|
|
|
log.Fatal("Failed to create file system: ", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(args) >= 2 {
|
|
|
|
fsrc, err = NewFs(args[1])
|
|
|
|
if err != nil {
|
2013-01-03 23:50:00 +01:00
|
|
|
stats.Error()
|
2012-12-29 12:35:41 +01:00
|
|
|
log.Fatal("Failed to create destination file system: ", err)
|
|
|
|
}
|
|
|
|
fsrc, fdst = fdst, fsrc
|
|
|
|
}
|
|
|
|
|
2013-01-03 23:50:00 +01:00
|
|
|
// Print the stats every statsInterval
|
|
|
|
go func() {
|
|
|
|
ch := time.Tick(*statsInterval)
|
|
|
|
for {
|
|
|
|
<-ch
|
|
|
|
stats.Log()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2012-12-29 12:35:41 +01:00
|
|
|
// Run the actual command
|
2012-12-31 18:31:19 +01:00
|
|
|
if found.run != nil {
|
|
|
|
found.run(fdst, fsrc)
|
2013-01-03 23:50:00 +01:00
|
|
|
fmt.Println(stats)
|
|
|
|
if stats.errors > 0 {
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
2012-12-31 18:31:19 +01:00
|
|
|
} else {
|
|
|
|
syntaxError()
|
|
|
|
}
|
|
|
|
|
2012-11-18 18:32:31 +01:00
|
|
|
}
|