zrepl/cmd/main.go

273 lines
5.9 KiB
Go
Raw Normal View History

2017-04-14 19:26:32 +02:00
package main
2017-04-26 20:21:18 +02:00
import (
"fmt"
2017-04-26 20:25:53 +02:00
"github.com/urfave/cli"
"github.com/zrepl/zrepl/jobrun"
2017-05-07 12:28:31 +02:00
"github.com/zrepl/zrepl/rpc"
"github.com/zrepl/zrepl/sshbytestream"
"github.com/zrepl/zrepl/zfs"
"golang.org/x/sys/unix"
2017-05-07 12:28:31 +02:00
"io"
"log"
2017-06-09 21:01:50 +02:00
"net/http"
_ "net/http/pprof"
"os"
2017-05-07 12:28:31 +02:00
"runtime/debug"
"sync"
"time"
2017-04-26 20:21:18 +02:00
)
type Logger interface {
Printf(format string, v ...interface{})
}
2017-04-26 20:21:18 +02:00
var conf Config
var runner *jobrun.JobRunner
2017-05-03 18:13:50 +02:00
var logFlags int = log.LUTC | log.Ldate | log.Ltime
2017-05-20 18:08:52 +02:00
var logOut io.Writer
var defaultLog Logger
2017-04-14 19:26:32 +02:00
2017-04-26 20:21:18 +02:00
func main() {
2017-04-14 19:26:32 +02:00
2017-05-07 12:28:31 +02:00
defer func() {
2017-05-07 20:42:39 +02:00
e := recover()
2017-05-14 14:10:18 +02:00
if e != nil {
defaultLog.Printf("panic:\n%s\n\n", debug.Stack())
defaultLog.Printf("error: %t %s", e, e)
os.Exit(1)
}
2017-05-07 12:28:31 +02:00
}()
2017-04-26 20:21:18 +02:00
app := cli.NewApp()
2017-04-14 19:26:32 +02:00
2017-04-26 20:21:18 +02:00
app.Name = "zrepl"
app.Usage = "replicate zfs datasets"
app.EnableBashCompletion = true
app.Flags = []cli.Flag{
cli.StringFlag{Name: "config"},
2017-05-20 18:08:52 +02:00
cli.StringFlag{Name: "logfile"},
2017-06-09 21:01:50 +02:00
cli.StringFlag{Name: "debug.pprof.http"},
2017-04-14 19:26:32 +02:00
}
2017-04-26 20:25:53 +02:00
app.Before = func(c *cli.Context) (err error) {
2017-06-09 21:01:50 +02:00
// Logging
2017-05-20 18:08:52 +02:00
if c.GlobalIsSet("logfile") {
var logFile *os.File
logFile, err = os.OpenFile(c.String("logfile"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
if err != nil {
return
}
2017-05-20 18:08:52 +02:00
if err = unix.Dup2(int(logFile.Fd()), int(os.Stderr.Fd())); err != nil {
logFile.WriteString(fmt.Sprintf("error duping logfile to stderr: %s\n", err))
return
}
logOut = logFile
} else {
logOut = os.Stderr
}
defaultLog = log.New(logOut, "", logFlags)
2017-06-09 21:01:50 +02:00
// CPU profiling
if c.GlobalIsSet("debug.pprof.http") {
go func() {
http.ListenAndServe(c.GlobalString("debug.pprof.http"), nil)
}()
}
// Config
2017-04-26 20:21:18 +02:00
if !c.GlobalIsSet("config") {
2017-05-07 12:28:31 +02:00
return cli.NewExitError("config flag not set", 2)
2017-04-26 20:21:18 +02:00
}
if conf, err = ParseConfig(c.GlobalString("config")); err != nil {
2017-05-07 12:28:31 +02:00
return cli.NewExitError(err, 2)
2017-04-26 20:21:18 +02:00
}
2017-05-03 18:13:50 +02:00
jobrunLogger := log.New(os.Stderr, "jobrun ", logFlags)
runner = jobrun.NewJobRunner(jobrunLogger)
2017-04-26 20:21:18 +02:00
return
}
app.Commands = []cli.Command{
2017-04-26 20:25:53 +02:00
{
2017-05-16 16:43:27 +02:00
Name: "stdinserver",
2017-04-26 20:25:53 +02:00
Aliases: []string{"s"},
2017-05-16 16:43:27 +02:00
Usage: "start in stdin server mode (from authorized keys)",
2017-04-26 20:25:53 +02:00
Flags: []cli.Flag{
cli.StringFlag{Name: "identity"},
},
2017-05-16 16:43:27 +02:00
Action: cmdStdinServer,
2017-04-26 20:21:18 +02:00
},
2017-04-26 20:25:53 +02:00
{
Name: "run",
Aliases: []string{"r"},
Usage: "do replication",
2017-05-16 16:43:27 +02:00
Action: cmdRun,
2017-06-09 20:54:01 +02:00
Flags: []cli.Flag{
cli.StringFlag{Name: "job"},
},
2017-04-26 20:25:53 +02:00
},
}
2017-04-14 19:26:32 +02:00
2017-05-07 12:28:31 +02:00
app.Run(os.Args)
2017-04-14 19:26:32 +02:00
2017-04-26 20:21:18 +02:00
}
2017-04-14 19:26:32 +02:00
2017-05-16 16:43:27 +02:00
func cmdStdinServer(c *cli.Context) (err error) {
2017-05-07 12:28:31 +02:00
if !c.IsSet("identity") {
return cli.NewExitError("identity flag not set", 2)
}
identity := c.String("identity")
var sshByteStream io.ReadWriteCloser
if sshByteStream, err = sshbytestream.Incoming(); err != nil {
return
}
findMapping := func(cm []ClientMapping, identity string) zfs.DatasetMapping {
2017-05-07 12:28:31 +02:00
for i := range cm {
if cm[i].From == identity {
return cm[i].Mapping
}
}
return nil
}
sinkMapping := func(identity string) (sink zfs.DatasetMapping, err error) {
if sink = findMapping(conf.Sinks, identity); sink == nil {
return nil, fmt.Errorf("could not find sink for dataset")
}
return
}
2017-05-07 12:28:31 +02:00
sinkLogger := log.New(logOut, fmt.Sprintf("sink[%s] ", identity), logFlags)
handler := Handler{
Logger: sinkLogger,
SinkMappingFunc: sinkMapping,
PullACL: findMapping(conf.PullACLs, identity),
2017-05-07 12:28:31 +02:00
}
if err = rpc.ListenByteStreamRPC(sshByteStream, identity, handler, sinkLogger); err != nil {
2017-05-07 12:28:31 +02:00
//os.Exit(1)
err = cli.NewExitError(err, 1)
defaultLog.Printf("listenbytestreamerror: %#v\n", err)
}
return
2017-05-07 12:28:31 +02:00
2017-04-14 19:26:32 +02:00
}
2017-05-16 16:43:27 +02:00
func cmdRun(c *cli.Context) error {
2017-04-14 19:26:32 +02:00
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
runner.Start()
}()
2017-06-09 20:54:01 +02:00
jobs := make([]jobrun.Job, len(conf.Pulls)+len(conf.Pushs))
i := 0
for _, pull := range conf.Pulls {
jobs[i] = jobrun.Job{
Name: fmt.Sprintf("pull.%d", i),
RepeatStrategy: pull.RepeatStrategy,
2017-05-03 18:13:50 +02:00
RunFunc: func(log jobrun.Logger) error {
2017-05-07 12:28:31 +02:00
log.Printf("doing pull: %v", pull)
return jobPull(pull, c, log)
},
}
2017-06-09 20:54:01 +02:00
i++
}
2017-06-09 20:54:01 +02:00
for _, push := range conf.Pushs {
jobs[i] = jobrun.Job{
Name: fmt.Sprintf("push.%d", i),
RepeatStrategy: push.RepeatStrategy,
2017-05-03 18:13:50 +02:00
RunFunc: func(log jobrun.Logger) error {
log.Printf("doing push: %v", push)
return jobPush(push, c, log)
},
}
2017-06-09 20:54:01 +02:00
i++
}
2017-06-09 20:54:01 +02:00
for _, j := range jobs {
if c.IsSet("job") {
if c.String("job") == j.Name {
runner.AddJob(j)
}
continue
}
runner.AddJob(j)
}
2017-05-07 12:28:31 +02:00
for {
select {
case job := <-runner.NotificationChan():
2017-06-09 20:54:01 +02:00
log.Printf("job %s reported error: %v\n", job.Name, job.LastError)
2017-05-07 12:28:31 +02:00
}
}
wg.Wait()
2017-04-14 19:26:32 +02:00
2017-04-26 20:21:18 +02:00
return nil
2017-04-14 19:26:32 +02:00
}
2017-05-07 12:28:31 +02:00
func jobPull(pull Pull, c *cli.Context, log jobrun.Logger) (err error) {
2017-05-07 12:28:31 +02:00
if lt, ok := pull.From.Transport.(LocalTransport); ok {
lt.SetHandler(Handler{
Logger: log,
PullACL: pull.Mapping,
2017-05-07 12:28:31 +02:00
})
pull.From.Transport = lt
log.Printf("fixing up local transport: %#v", pull.From.Transport)
}
var remote rpc.RPCRequester
if remote, err = pull.From.Transport.Connect(log); err != nil {
2017-05-07 12:28:31 +02:00
return
}
2017-05-14 14:10:18 +02:00
defer closeRPCWithTimeout(log, remote, time.Second*10, "")
return doPull(PullContext{remote, log, pull.Mapping, pull.InitialReplPolicy})
2017-05-07 12:28:31 +02:00
}
func jobPush(push Push, c *cli.Context, log jobrun.Logger) (err error) {
if _, ok := push.To.Transport.(LocalTransport); ok {
panic("no support for local pushs")
}
var remote rpc.RPCRequester
if remote, err = push.To.Transport.Connect(log); err != nil {
return err
}
defer closeRPCWithTimeout(log, remote, time.Second*10, "")
log.Printf("building handler for PullMeRequest")
handler := Handler{
Logger: log,
PullACL: push.Filter,
SinkMappingFunc: nil, // no need for that in the handler for PullMe
}
log.Printf("handler: %#v", handler)
r := rpc.PullMeRequest{
InitialReplPolicy: push.InitialReplPolicy,
}
log.Printf("doing PullMeRequest: %#v", r)
if err = remote.PullMeRequest(r, handler); err != nil {
log.Printf("PullMeRequest failed: %s", err)
return
}
log.Printf("push job finished")
return
}