2017-04-14 19:26:32 +02:00
|
|
|
package zfs
|
|
|
|
|
2017-04-26 17:39:16 +02:00
|
|
|
import (
|
|
|
|
"bufio"
|
2017-05-01 20:35:04 +02:00
|
|
|
"bytes"
|
2017-08-06 13:04:29 +02:00
|
|
|
"encoding/json"
|
2017-04-26 17:39:16 +02:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2017-04-26 20:25:53 +02:00
|
|
|
"io"
|
|
|
|
"os/exec"
|
|
|
|
"strings"
|
2017-08-06 13:04:29 +02:00
|
|
|
|
2018-02-18 13:28:46 +01:00
|
|
|
"context"
|
|
|
|
"github.com/problame/go-rwccmd"
|
2018-04-05 22:12:25 +02:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2017-08-06 13:04:29 +02:00
|
|
|
"github.com/zrepl/zrepl/util"
|
2018-08-30 11:51:47 +02:00
|
|
|
"regexp"
|
2018-08-29 23:29:45 +02:00
|
|
|
"strconv"
|
2017-04-26 17:39:16 +02:00
|
|
|
)
|
2017-04-14 19:26:32 +02:00
|
|
|
|
2017-08-05 21:15:37 +02:00
|
|
|
type DatasetPath struct {
|
|
|
|
comps []string
|
|
|
|
}
|
2017-04-26 17:39:16 +02:00
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) ToString() string {
|
2017-08-05 21:15:37 +02:00
|
|
|
return strings.Join(p.comps, "/")
|
2017-04-26 17:39:16 +02:00
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) Empty() bool {
|
2017-08-05 21:15:37 +02:00
|
|
|
return len(p.comps) == 0
|
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) Extend(extend *DatasetPath) {
|
2017-08-05 21:15:37 +02:00
|
|
|
p.comps = append(p.comps, extend.comps...)
|
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) HasPrefix(prefix *DatasetPath) bool {
|
2017-08-05 21:15:37 +02:00
|
|
|
if len(prefix.comps) > len(p.comps) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := range prefix.comps {
|
|
|
|
if prefix.comps[i] != p.comps[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
2017-04-30 16:13:05 +02:00
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) TrimPrefix(prefix *DatasetPath) {
|
2017-08-05 21:15:37 +02:00
|
|
|
if !p.HasPrefix(prefix) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
prelen := len(prefix.comps)
|
|
|
|
newlen := len(p.comps) - prelen
|
|
|
|
oldcomps := p.comps
|
|
|
|
p.comps = make([]string, newlen)
|
|
|
|
for i := 0; i < newlen; i++ {
|
|
|
|
p.comps[i] = oldcomps[prelen+i]
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *DatasetPath) TrimNPrefixComps(n int) {
|
|
|
|
if len(p.comps) < n {
|
|
|
|
n = len(p.comps)
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.comps = p.comps[n:]
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p DatasetPath) Equal(q *DatasetPath) bool {
|
2017-08-05 21:15:37 +02:00
|
|
|
if len(p.comps) != len(q.comps) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := range p.comps {
|
|
|
|
if p.comps[i] != q.comps[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) Length() int {
|
2017-08-05 21:15:37 +02:00
|
|
|
return len(p.comps)
|
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) Copy() (c *DatasetPath) {
|
|
|
|
c = &DatasetPath{}
|
2017-08-05 21:15:37 +02:00
|
|
|
c.comps = make([]string, len(p.comps))
|
|
|
|
copy(c.comps, p.comps)
|
|
|
|
return
|
|
|
|
}
|
2017-04-30 16:13:05 +02:00
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (p *DatasetPath) MarshalJSON() ([]byte, error) {
|
|
|
|
return json.Marshal(p.comps)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *DatasetPath) UnmarshalJSON(b []byte) error {
|
|
|
|
p.comps = make([]string, 0)
|
|
|
|
return json.Unmarshal(b, &p.comps)
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewDatasetPath(s string) (p *DatasetPath, err error) {
|
|
|
|
p = &DatasetPath{}
|
2017-04-30 16:13:05 +02:00
|
|
|
if s == "" {
|
2017-08-05 21:15:37 +02:00
|
|
|
p.comps = make([]string, 0)
|
|
|
|
return p, nil // the empty dataset path
|
2017-04-30 16:13:05 +02:00
|
|
|
}
|
2018-06-19 19:51:40 +02:00
|
|
|
const FORBIDDEN = "@#|\t<>*"
|
|
|
|
/* Documenation of allowed characters in zfs names:
|
|
|
|
https://docs.oracle.com/cd/E19253-01/819-5461/gbcpt/index.html
|
|
|
|
Space is missing in the oracle list, but according to
|
|
|
|
https://github.com/zfsonlinux/zfs/issues/439
|
|
|
|
there is evidence that it was intentionally allowed
|
|
|
|
*/
|
|
|
|
if strings.ContainsAny(s, FORBIDDEN) {
|
2017-08-05 21:15:37 +02:00
|
|
|
err = fmt.Errorf("contains forbidden characters (any of '%s')", FORBIDDEN)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.comps = strings.Split(s, "/")
|
|
|
|
if p.comps[len(p.comps)-1] == "" {
|
|
|
|
err = fmt.Errorf("must not end with a '/'")
|
|
|
|
return
|
2017-05-01 20:09:17 +02:00
|
|
|
}
|
2017-08-05 21:15:37 +02:00
|
|
|
return
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func toDatasetPath(s string) *DatasetPath {
|
2017-05-01 20:35:04 +02:00
|
|
|
p, err := NewDatasetPath(s)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return p
|
2017-04-26 17:39:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type ZFSError struct {
|
|
|
|
Stderr []byte
|
|
|
|
WaitErr error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e ZFSError) Error() string {
|
2018-09-03 00:32:08 +02:00
|
|
|
return fmt.Sprintf("zfs exited with error: %s\nstderr:\n%s", e.WaitErr.Error(), e.Stderr)
|
2017-04-26 17:39:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var ZFS_BINARY string = "zfs"
|
|
|
|
|
2017-05-01 20:35:04 +02:00
|
|
|
func ZFSList(properties []string, zfsArgs ...string) (res [][]string, err error) {
|
2017-04-14 19:26:32 +02:00
|
|
|
|
2017-05-01 20:35:04 +02:00
|
|
|
args := make([]string, 0, 4+len(zfsArgs))
|
2017-04-30 16:13:05 +02:00
|
|
|
args = append(args,
|
2017-05-06 10:57:43 +02:00
|
|
|
"list", "-H", "-p",
|
2017-05-01 20:35:04 +02:00
|
|
|
"-o", strings.Join(properties, ","))
|
|
|
|
args = append(args, zfsArgs...)
|
2017-04-30 16:13:05 +02:00
|
|
|
|
|
|
|
cmd := exec.Command(ZFS_BINARY, args...)
|
2017-04-26 17:39:16 +02:00
|
|
|
|
|
|
|
var stdout io.Reader
|
2017-05-01 20:35:04 +02:00
|
|
|
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
|
|
cmd.Stderr = stderr
|
2017-04-26 17:39:16 +02:00
|
|
|
|
|
|
|
if stdout, err = cmd.StdoutPipe(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s := bufio.NewScanner(stdout)
|
|
|
|
buf := make([]byte, 1024)
|
|
|
|
s.Buffer(buf, 0)
|
|
|
|
|
2017-05-01 20:35:04 +02:00
|
|
|
res = make([][]string, 0)
|
2017-04-26 17:39:16 +02:00
|
|
|
|
|
|
|
for s.Scan() {
|
2017-05-01 20:35:04 +02:00
|
|
|
fields := strings.SplitN(s.Text(), "\t", len(properties))
|
|
|
|
|
|
|
|
if len(fields) != len(properties) {
|
2017-04-26 17:39:16 +02:00
|
|
|
err = errors.New("unexpected output")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-01 20:35:04 +02:00
|
|
|
res = append(res, fields)
|
2017-04-26 17:39:16 +02:00
|
|
|
}
|
|
|
|
|
2017-04-26 20:25:53 +02:00
|
|
|
if waitErr := cmd.Wait(); waitErr != nil {
|
2017-04-26 17:39:16 +02:00
|
|
|
err := ZFSError{
|
2017-05-01 20:35:04 +02:00
|
|
|
Stderr: stderr.Bytes(),
|
2017-04-26 17:39:16 +02:00
|
|
|
WaitErr: waitErr,
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2017-05-07 12:18:54 +02:00
|
|
|
|
2018-02-18 13:28:46 +01:00
|
|
|
type ZFSListResult struct {
|
2018-06-20 20:20:37 +02:00
|
|
|
Fields []string
|
|
|
|
Err error
|
2018-02-18 13:28:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ZFSListChan executes `zfs list` and sends the results to the `out` channel.
|
|
|
|
// The `out` channel is always closed by ZFSListChan:
|
2018-06-20 20:20:37 +02:00
|
|
|
// If an error occurs, it is closed after sending a result with the Err field set.
|
2018-02-18 13:28:46 +01:00
|
|
|
// If no error occurs, it is just closed.
|
|
|
|
// If the operation is cancelled via context, the channel is just closed.
|
|
|
|
//
|
|
|
|
// However, if callers do not drain `out` or cancel via `ctx`, the process will leak either running because
|
|
|
|
// IO is pending or as a zombie.
|
|
|
|
func ZFSListChan(ctx context.Context, out chan ZFSListResult, properties []string, zfsArgs ...string) {
|
|
|
|
defer close(out)
|
|
|
|
|
|
|
|
args := make([]string, 0, 4+len(zfsArgs))
|
|
|
|
args = append(args,
|
|
|
|
"list", "-H", "-p",
|
|
|
|
"-o", strings.Join(properties, ","))
|
|
|
|
args = append(args, zfsArgs...)
|
|
|
|
|
|
|
|
sendResult := func(fields []string, err error) (done bool) {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return true
|
|
|
|
case out <- ZFSListResult{fields, err}:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd, err := rwccmd.CommandContext(ctx, ZFS_BINARY, args, []string{})
|
|
|
|
if err != nil {
|
|
|
|
sendResult(nil, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
sendResult(nil, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer cmd.Close()
|
|
|
|
|
|
|
|
s := bufio.NewScanner(cmd)
|
|
|
|
buf := make([]byte, 1024) // max line length
|
|
|
|
s.Buffer(buf, 0)
|
|
|
|
|
|
|
|
for s.Scan() {
|
|
|
|
fields := strings.SplitN(s.Text(), "\t", len(properties))
|
|
|
|
if len(fields) != len(properties) {
|
|
|
|
sendResult(nil, errors.New("unexpected output"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if sendResult(fields, nil) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.Err() != nil {
|
|
|
|
sendResult(nil, s.Err())
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
func validateRelativeZFSVersion(s string) error {
|
|
|
|
if len(s) <= 1 {
|
|
|
|
return errors.New("version must start with a delimiter char followed by at least one character")
|
|
|
|
}
|
|
|
|
if !(s[0] == '#' || s[0] == '@') {
|
|
|
|
return errors.New("version name starts with invalid delimiter char")
|
|
|
|
}
|
|
|
|
// FIXME whitespace check...
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateZFSFilesystem(fs string) error {
|
|
|
|
if len(fs) < 1 {
|
|
|
|
return errors.New("filesystem path must have length > 0")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func absVersion(fs, v string) (full string, err error) {
|
|
|
|
if err := validateZFSFilesystem(fs); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if err := validateRelativeZFSVersion(v); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%s%s", fs, v), nil
|
|
|
|
}
|
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func ZFSSend(fs string, from, to string) (stream io.ReadCloser, err error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
|
|
|
|
fromV, err := absVersion(fs, from)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
toV := ""
|
|
|
|
if to != "" {
|
|
|
|
toV, err = absVersion(fs, to)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2017-05-07 12:18:54 +02:00
|
|
|
|
|
|
|
args := make([]string, 0)
|
|
|
|
args = append(args, "send")
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
if toV == "" { // Initial
|
|
|
|
args = append(args, fromV)
|
2017-05-07 12:18:54 +02:00
|
|
|
} else {
|
2018-06-20 20:20:37 +02:00
|
|
|
args = append(args, "-i", fromV, toV)
|
2017-05-07 12:18:54 +02:00
|
|
|
}
|
|
|
|
|
2017-05-14 12:27:15 +02:00
|
|
|
stream, err = util.RunIOCommand(ZFS_BINARY, args...)
|
2017-05-07 12:18:54 +02:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2017-05-07 12:22:57 +02:00
|
|
|
|
2018-09-06 02:41:25 +02:00
|
|
|
var BookmarkSizeEstimationNotSupported error = fmt.Errorf("size estimation is not supported for bookmarks")
|
|
|
|
|
|
|
|
// May return BookmarkSizeEstimationNotSupported as err if from is a bookmark.
|
2018-08-29 23:29:45 +02:00
|
|
|
func ZFSSendDry(fs string, from, to string) (size int64, err error) {
|
|
|
|
|
|
|
|
fromV, err := absVersion(fs, from)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
toV := ""
|
|
|
|
if to != "" {
|
|
|
|
toV, err = absVersion(fs, to)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-06 02:41:25 +02:00
|
|
|
if strings.Contains(fromV, "#") {
|
|
|
|
/* TODO:
|
|
|
|
* ZFS at the time of writing does not support dry-run send because size-estimation
|
|
|
|
* uses fromSnap's deadlist. However, for a bookmark, that deadlist no longer exists.
|
|
|
|
* Redacted send & recv will bring this functionality, see
|
|
|
|
* https://github.com/openzfs/openzfs/pull/484
|
|
|
|
*/
|
|
|
|
return 0, BookmarkSizeEstimationNotSupported
|
|
|
|
}
|
|
|
|
|
2018-08-29 23:29:45 +02:00
|
|
|
args := make([]string, 0)
|
|
|
|
args = append(args, "send", "-n", "-v", "-P")
|
|
|
|
|
|
|
|
if toV == "" { // Initial
|
|
|
|
args = append(args, fromV)
|
|
|
|
} else {
|
|
|
|
args = append(args, "-i", fromV, toV)
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd := exec.Command(ZFS_BINARY, args...)
|
|
|
|
output, err := cmd.CombinedOutput()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
o := string(output)
|
|
|
|
lines := strings.Split(o, "\n")
|
|
|
|
if len(lines) < 2 {
|
|
|
|
return 0, errors.New("zfs send -n did not return the expected number of lines")
|
|
|
|
}
|
|
|
|
fields := strings.Fields(lines[1])
|
|
|
|
if len(fields) != 2 {
|
|
|
|
return 0, errors.New("zfs send -n returned unexpexted output")
|
|
|
|
}
|
|
|
|
|
|
|
|
size, err = strconv.ParseInt(fields[1], 10, 64)
|
|
|
|
return size, err
|
|
|
|
}
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
func ZFSRecv(fs string, stream io.Reader, additionalArgs ...string) (err error) {
|
|
|
|
|
|
|
|
if err := validateZFSFilesystem(fs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-07 12:22:57 +02:00
|
|
|
|
|
|
|
args := make([]string, 0)
|
|
|
|
args = append(args, "recv")
|
|
|
|
if len(args) > 0 {
|
|
|
|
args = append(args, additionalArgs...)
|
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
args = append(args, fs)
|
2017-05-07 12:22:57 +02:00
|
|
|
|
|
|
|
cmd := exec.Command(ZFS_BINARY, args...)
|
|
|
|
|
|
|
|
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
|
|
cmd.Stderr = stderr
|
|
|
|
|
|
|
|
// TODO report bug upstream
|
|
|
|
// Setup an unused stdout buffer.
|
|
|
|
// Otherwise, ZoL v0.6.5.9-1 3.16.0-4-amd64 writes the following error to stderr and exits with code 1
|
|
|
|
// cannot receive new filesystem stream: invalid backup stream
|
|
|
|
stdout := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
|
|
cmd.Stdout = stdout
|
|
|
|
|
|
|
|
cmd.Stdin = stream
|
|
|
|
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = cmd.Wait(); err != nil {
|
|
|
|
err = ZFSError{
|
|
|
|
Stderr: stderr.Bytes(),
|
|
|
|
WaitErr: err,
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
func ZFSRecvWriter(fs *DatasetPath, additionalArgs ...string) (io.WriteCloser, error) {
|
|
|
|
|
|
|
|
args := make([]string, 0)
|
|
|
|
args = append(args, "recv")
|
|
|
|
if len(args) > 0 {
|
|
|
|
args = append(args, additionalArgs...)
|
|
|
|
}
|
|
|
|
args = append(args, fs.ToString())
|
|
|
|
|
|
|
|
cmd, err := util.NewIOCommand(ZFS_BINARY, args, 1024)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmd.Stdin, nil
|
|
|
|
}
|
|
|
|
|
2018-01-05 18:42:10 +01:00
|
|
|
type ZFSProperties struct {
|
|
|
|
m map[string]string
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewZFSProperties() *ZFSProperties {
|
|
|
|
return &ZFSProperties{make(map[string]string, 4)}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *ZFSProperties) Set(key, val string) {
|
|
|
|
p.m[key] = val
|
|
|
|
}
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
func (p *ZFSProperties) Get(key string) string {
|
|
|
|
return p.m[key]
|
|
|
|
}
|
|
|
|
|
2018-01-05 18:42:10 +01:00
|
|
|
func (p *ZFSProperties) appendArgs(args *[]string) (err error) {
|
|
|
|
for prop, val := range p.m {
|
|
|
|
if strings.Contains(prop, "=") {
|
|
|
|
return errors.New("prop contains rune '=' which is the delimiter between property name and value")
|
|
|
|
}
|
|
|
|
*args = append(*args, fmt.Sprintf("%s=%s", prop, val))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-07 12:23:12 +02:00
|
|
|
|
2018-01-05 18:42:10 +01:00
|
|
|
func ZFSSet(fs *DatasetPath, props *ZFSProperties) (err error) {
|
2018-08-30 11:51:47 +02:00
|
|
|
return zfsSet(fs.ToString(), props)
|
|
|
|
}
|
2018-01-05 18:42:10 +01:00
|
|
|
|
2018-08-30 11:51:47 +02:00
|
|
|
func zfsSet(path string, props *ZFSProperties) (err error) {
|
2018-01-05 18:42:10 +01:00
|
|
|
args := make([]string, 0)
|
|
|
|
args = append(args, "set")
|
|
|
|
err = props.appendArgs(&args)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-05-07 12:23:12 +02:00
|
|
|
}
|
2018-08-30 11:51:47 +02:00
|
|
|
args = append(args, path)
|
2017-05-07 12:23:12 +02:00
|
|
|
|
2018-01-05 18:42:10 +01:00
|
|
|
cmd := exec.Command(ZFS_BINARY, args...)
|
2017-05-07 12:23:12 +02:00
|
|
|
|
|
|
|
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
|
|
cmd.Stderr = stderr
|
|
|
|
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = cmd.Wait(); err != nil {
|
|
|
|
err = ZFSError{
|
|
|
|
Stderr: stderr.Bytes(),
|
|
|
|
WaitErr: err,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2017-07-01 18:21:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
func ZFSGet(fs *DatasetPath, props []string) (*ZFSProperties, error) {
|
2018-09-06 04:44:35 +02:00
|
|
|
return zfsGet(fs.ToString(), props, sourceAny)
|
2018-08-30 11:51:47 +02:00
|
|
|
}
|
|
|
|
|
2018-09-06 03:24:15 +02:00
|
|
|
var zfsGetDatasetDoesNotExistRegexp = regexp.MustCompile(`^cannot open '(\S+)': (dataset does not exist|no such pool or dataset)`)
|
2018-06-20 20:20:37 +02:00
|
|
|
|
2018-08-30 11:51:47 +02:00
|
|
|
type DatasetDoesNotExist struct {
|
|
|
|
Path string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DatasetDoesNotExist) Error() string { return fmt.Sprintf("dataset %q does not exist", d.Path) }
|
|
|
|
|
2018-09-06 04:44:35 +02:00
|
|
|
type zfsPropertySource uint
|
|
|
|
|
|
|
|
const (
|
|
|
|
sourceLocal zfsPropertySource = 1 << iota
|
|
|
|
sourceDefault
|
|
|
|
sourceInherited
|
|
|
|
sourceNone
|
|
|
|
sourceTemporary
|
|
|
|
|
|
|
|
sourceAny zfsPropertySource = ^zfsPropertySource(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
func (s zfsPropertySource) zfsGetSourceFieldPrefixes() []string {
|
|
|
|
prefixes := make([]string, 0, 5)
|
|
|
|
if s&sourceLocal != 0 {prefixes = append(prefixes, "local")}
|
|
|
|
if s&sourceDefault != 0 {prefixes = append(prefixes, "default")}
|
|
|
|
if s&sourceInherited != 0 {prefixes = append(prefixes, "inherited")}
|
|
|
|
if s&sourceNone != 0 {prefixes = append(prefixes, "-")}
|
|
|
|
if s&sourceTemporary != 0 { prefixes = append(prefixes, "temporary")}
|
|
|
|
return prefixes
|
|
|
|
}
|
|
|
|
|
|
|
|
func zfsGet(path string, props []string, allowedSources zfsPropertySource) (*ZFSProperties, error) {
|
|
|
|
args := []string{"get", "-Hp", "-o", "property,value,source", strings.Join(props, ","), path}
|
2018-06-20 20:20:37 +02:00
|
|
|
cmd := exec.Command(ZFS_BINARY, args...)
|
2018-09-04 22:30:52 +02:00
|
|
|
stdout, err := cmd.Output()
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
2018-08-30 11:51:47 +02:00
|
|
|
if exitErr, ok := err.(*exec.ExitError); ok {
|
|
|
|
if exitErr.Exited() {
|
|
|
|
// screen-scrape output
|
2018-09-04 22:30:52 +02:00
|
|
|
if sm := zfsGetDatasetDoesNotExistRegexp.FindSubmatch(exitErr.Stderr); sm != nil {
|
2018-08-30 11:51:47 +02:00
|
|
|
if string(sm[1]) == path {
|
|
|
|
return nil, &DatasetDoesNotExist{path}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-09-04 22:30:52 +02:00
|
|
|
o := string(stdout)
|
2018-06-20 20:20:37 +02:00
|
|
|
lines := strings.Split(o, "\n")
|
|
|
|
if len(lines) < 1 || // account for newlines
|
|
|
|
len(lines)-1 != len(props) {
|
|
|
|
return nil, fmt.Errorf("zfs get did not return the number of expected property values")
|
|
|
|
}
|
|
|
|
res := &ZFSProperties{
|
|
|
|
make(map[string]string, len(lines)),
|
|
|
|
}
|
2018-09-06 04:44:35 +02:00
|
|
|
allowedPrefixes := allowedSources.zfsGetSourceFieldPrefixes()
|
2018-06-20 20:20:37 +02:00
|
|
|
for _, line := range lines[:len(lines)-1] {
|
2018-09-06 04:44:35 +02:00
|
|
|
fields := strings.FieldsFunc(line, func(r rune) bool {
|
|
|
|
return r == '\t'
|
|
|
|
})
|
|
|
|
if len(fields) != 3 {
|
|
|
|
return nil, fmt.Errorf("zfs get did not return property,value,source tuples")
|
|
|
|
}
|
|
|
|
for _, p := range allowedPrefixes {
|
|
|
|
if strings.HasPrefix(fields[2],p) {
|
|
|
|
res.m[fields[0]] = fields[1]
|
|
|
|
break
|
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2017-07-01 18:21:18 +02:00
|
|
|
func ZFSDestroy(dataset string) (err error) {
|
|
|
|
|
2018-09-08 07:03:41 +02:00
|
|
|
var dstype, filesystem string
|
|
|
|
idx := strings.IndexAny(dataset, "@#")
|
|
|
|
if idx == -1 {
|
|
|
|
dstype = "filesystem"
|
|
|
|
filesystem = dataset
|
|
|
|
} else {
|
|
|
|
switch dataset[idx] {
|
|
|
|
case '@': dstype = "snapshot"
|
|
|
|
case '#': dstype = "bookmark"
|
|
|
|
}
|
|
|
|
filesystem = dataset[:idx]
|
|
|
|
}
|
|
|
|
|
|
|
|
defer prometheus.NewTimer(prom.ZFSDestroyDuration.WithLabelValues(dstype, filesystem))
|
|
|
|
|
2017-07-01 18:21:18 +02:00
|
|
|
cmd := exec.Command(ZFS_BINARY, "destroy", dataset)
|
|
|
|
|
|
|
|
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
|
|
cmd.Stderr = stderr
|
|
|
|
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = cmd.Wait(); err != nil {
|
|
|
|
err = ZFSError{
|
|
|
|
Stderr: stderr.Bytes(),
|
|
|
|
WaitErr: err,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
2017-07-01 20:28:46 +02:00
|
|
|
|
2017-11-12 23:05:18 +01:00
|
|
|
func zfsBuildSnapName(fs *DatasetPath, name string) string { // TODO defensive
|
|
|
|
return fmt.Sprintf("%s@%s", fs.ToString(), name)
|
|
|
|
}
|
|
|
|
|
|
|
|
func zfsBuildBookmarkName(fs *DatasetPath, name string) string { // TODO defensive
|
|
|
|
return fmt.Sprintf("%s#%s", fs.ToString(), name)
|
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func ZFSSnapshot(fs *DatasetPath, name string, recursive bool) (err error) {
|
2017-07-01 20:28:46 +02:00
|
|
|
|
2018-04-05 22:12:25 +02:00
|
|
|
promTimer := prometheus.NewTimer(prom.ZFSSnapshotDuration.WithLabelValues(fs.ToString()))
|
|
|
|
defer promTimer.ObserveDuration()
|
|
|
|
|
2017-11-12 23:05:18 +01:00
|
|
|
snapname := zfsBuildSnapName(fs, name)
|
2017-07-01 20:28:46 +02:00
|
|
|
cmd := exec.Command(ZFS_BINARY, "snapshot", snapname)
|
|
|
|
|
|
|
|
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
|
|
cmd.Stderr = stderr
|
|
|
|
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = cmd.Wait(); err != nil {
|
|
|
|
err = ZFSError{
|
|
|
|
Stderr: stderr.Bytes(),
|
|
|
|
WaitErr: err,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
2017-11-12 23:05:18 +01:00
|
|
|
|
|
|
|
func ZFSBookmark(fs *DatasetPath, snapshot, bookmark string) (err error) {
|
|
|
|
|
2018-04-05 22:12:25 +02:00
|
|
|
promTimer := prometheus.NewTimer(prom.ZFSBookmarkDuration.WithLabelValues(fs.ToString()))
|
|
|
|
defer promTimer.ObserveDuration()
|
|
|
|
|
2017-11-12 23:05:18 +01:00
|
|
|
snapname := zfsBuildSnapName(fs, snapshot)
|
|
|
|
bookmarkname := zfsBuildBookmarkName(fs, bookmark)
|
|
|
|
|
|
|
|
cmd := exec.Command(ZFS_BINARY, "bookmark", snapname, bookmarkname)
|
|
|
|
|
|
|
|
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
|
|
cmd.Stderr = stderr
|
|
|
|
|
|
|
|
if err = cmd.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = cmd.Wait(); err != nil {
|
|
|
|
err = ZFSError{
|
|
|
|
Stderr: stderr.Bytes(),
|
|
|
|
WaitErr: err,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|