zrepl/zfs/zfs.go

311 lines
5.9 KiB
Go
Raw Normal View History

2017-04-14 19:26:32 +02:00
package zfs
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
2017-04-26 20:25:53 +02:00
"io"
"os/exec"
"strings"
"github.com/zrepl/zrepl/util"
)
2017-04-14 19:26:32 +02:00
type DatasetPath struct {
comps []string
}
func (p *DatasetPath) ToString() string {
return strings.Join(p.comps, "/")
}
func (p *DatasetPath) Empty() bool {
return len(p.comps) == 0
}
func (p *DatasetPath) Extend(extend *DatasetPath) {
p.comps = append(p.comps, extend.comps...)
}
func (p *DatasetPath) HasPrefix(prefix *DatasetPath) bool {
if len(prefix.comps) > len(p.comps) {
return false
}
for i := range prefix.comps {
if prefix.comps[i] != p.comps[i] {
return false
}
}
return true
}
func (p *DatasetPath) TrimPrefix(prefix *DatasetPath) {
if !p.HasPrefix(prefix) {
return
}
prelen := len(prefix.comps)
newlen := len(p.comps) - prelen
oldcomps := p.comps
p.comps = make([]string, newlen)
for i := 0; i < newlen; i++ {
p.comps[i] = oldcomps[prelen+i]
}
return
}
func (p *DatasetPath) TrimNPrefixComps(n int) {
if len(p.comps) < n {
n = len(p.comps)
}
if n == 0 {
return
}
p.comps = p.comps[n:]
}
func (p DatasetPath) Equal(q *DatasetPath) bool {
if len(p.comps) != len(q.comps) {
return false
}
for i := range p.comps {
if p.comps[i] != q.comps[i] {
return false
}
}
return true
}
func (p *DatasetPath) Length() int {
return len(p.comps)
}
func (p *DatasetPath) Copy() (c *DatasetPath) {
c = &DatasetPath{}
c.comps = make([]string, len(p.comps))
copy(c.comps, p.comps)
return
}
func (p *DatasetPath) MarshalJSON() ([]byte, error) {
return json.Marshal(p.comps)
}
func (p *DatasetPath) UnmarshalJSON(b []byte) error {
p.comps = make([]string, 0)
return json.Unmarshal(b, &p.comps)
}
func NewDatasetPath(s string) (p *DatasetPath, err error) {
p = &DatasetPath{}
if s == "" {
p.comps = make([]string, 0)
return p, nil // the empty dataset path
}
const FORBIDDEN = "@#|\t <>*"
if strings.ContainsAny(s, FORBIDDEN) { // TODO space may be a bit too restrictive...
err = fmt.Errorf("contains forbidden characters (any of '%s')", FORBIDDEN)
return
}
p.comps = strings.Split(s, "/")
if p.comps[len(p.comps)-1] == "" {
err = fmt.Errorf("must not end with a '/'")
return
}
return
2017-04-26 18:36:01 +02:00
}
func toDatasetPath(s string) *DatasetPath {
p, err := NewDatasetPath(s)
if err != nil {
panic(err)
}
return p
}
type ZFSError struct {
Stderr []byte
WaitErr error
}
func (e ZFSError) Error() string {
return fmt.Sprintf("zfs exited with error: %s", e.WaitErr.Error())
}
var ZFS_BINARY string = "zfs"
func ZFSList(properties []string, zfsArgs ...string) (res [][]string, err error) {
2017-04-14 19:26:32 +02:00
args := make([]string, 0, 4+len(zfsArgs))
args = append(args,
"list", "-H", "-p",
"-o", strings.Join(properties, ","))
args = append(args, zfsArgs...)
cmd := exec.Command(ZFS_BINARY, args...)
var stdout io.Reader
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
cmd.Stderr = stderr
if stdout, err = cmd.StdoutPipe(); err != nil {
return
}
if err = cmd.Start(); err != nil {
return
}
s := bufio.NewScanner(stdout)
buf := make([]byte, 1024)
s.Buffer(buf, 0)
res = make([][]string, 0)
for s.Scan() {
fields := strings.SplitN(s.Text(), "\t", len(properties))
if len(fields) != len(properties) {
err = errors.New("unexpected output")
return
}
res = append(res, fields)
}
2017-04-26 20:25:53 +02:00
if waitErr := cmd.Wait(); waitErr != nil {
err := ZFSError{
Stderr: stderr.Bytes(),
WaitErr: waitErr,
}
return nil, err
}
return
}
2017-05-07 12:18:54 +02:00
func ZFSSend(fs *DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err error) {
2017-05-07 12:18:54 +02:00
args := make([]string, 0)
args = append(args, "send")
if to == nil { // Initial
args = append(args, from.ToAbsPath(fs))
} else {
args = append(args, "-i", from.ToAbsPath(fs), to.ToAbsPath(fs))
}
stream, err = util.RunIOCommand(ZFS_BINARY, args...)
2017-05-07 12:18:54 +02:00
return
}
2017-05-07 12:22:57 +02:00
func ZFSRecv(fs *DatasetPath, stream io.Reader, additionalArgs ...string) (err error) {
2017-05-07 12:22:57 +02:00
args := make([]string, 0)
args = append(args, "recv")
if len(args) > 0 {
args = append(args, additionalArgs...)
}
args = append(args, fs.ToString())
cmd := exec.Command(ZFS_BINARY, args...)
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
cmd.Stderr = stderr
// TODO report bug upstream
// Setup an unused stdout buffer.
// Otherwise, ZoL v0.6.5.9-1 3.16.0-4-amd64 writes the following error to stderr and exits with code 1
// cannot receive new filesystem stream: invalid backup stream
stdout := bytes.NewBuffer(make([]byte, 0, 1024))
cmd.Stdout = stdout
cmd.Stdin = stream
if err = cmd.Start(); err != nil {
return
}
if err = cmd.Wait(); err != nil {
err = ZFSError{
Stderr: stderr.Bytes(),
WaitErr: err,
}
return
}
return nil
}
func ZFSSet(fs *DatasetPath, prop, val string) (err error) {
2017-05-07 12:23:12 +02:00
if strings.ContainsRune(prop, '=') {
panic("prop contains rune '=' which is the delimiter between property name and value")
}
cmd := exec.Command(ZFS_BINARY, "set", fmt.Sprintf("%s=%s", prop, val), fs.ToString())
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
cmd.Stderr = stderr
if err = cmd.Start(); err != nil {
return err
}
if err = cmd.Wait(); err != nil {
err = ZFSError{
Stderr: stderr.Bytes(),
WaitErr: err,
}
}
return
}
2017-07-01 18:21:18 +02:00
func ZFSDestroy(dataset string) (err error) {
cmd := exec.Command(ZFS_BINARY, "destroy", dataset)
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
cmd.Stderr = stderr
if err = cmd.Start(); err != nil {
return err
}
if err = cmd.Wait(); err != nil {
err = ZFSError{
Stderr: stderr.Bytes(),
WaitErr: err,
}
}
return
}
func ZFSSnapshot(fs *DatasetPath, name string, recursive bool) (err error) {
snapname := fmt.Sprintf("%s@%s", fs.ToString(), name)
cmd := exec.Command(ZFS_BINARY, "snapshot", snapname)
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
cmd.Stderr = stderr
if err = cmd.Start(); err != nil {
return err
}
if err = cmd.Wait(); err != nil {
err = ZFSError{
Stderr: stderr.Bytes(),
WaitErr: err,
}
}
return
}