2017-04-14 19:26:32 +02:00
package zfs
2017-04-26 17:39:16 +02:00
import (
"bufio"
2017-05-01 20:35:04 +02:00
"bytes"
2017-08-06 13:04:29 +02:00
"encoding/json"
2017-04-26 17:39:16 +02:00
"fmt"
2017-04-26 20:25:53 +02:00
"io"
2018-12-11 22:01:50 +01:00
"os"
2017-04-26 20:25:53 +02:00
"os/exec"
2019-03-13 18:33:20 +01:00
"sort"
2017-04-26 20:25:53 +02:00
"strings"
2018-12-11 22:01:50 +01:00
"sync"
"time"
2017-08-06 13:04:29 +02:00
2018-02-18 13:28:46 +01:00
"context"
2018-08-30 11:51:47 +02:00
"regexp"
2018-08-29 23:29:45 +02:00
"strconv"
2019-03-22 19:41:12 +01:00
"github.com/prometheus/client_golang/prometheus"
2019-10-14 17:48:47 +02:00
"github.com/pkg/errors"
2018-12-11 22:01:50 +01:00
"github.com/zrepl/zrepl/util/envconst"
)
var (
ZFSSendPipeCapacityHint = int ( envconst . Int64 ( "ZFS_SEND_PIPE_CAPACITY_HINT" , 1 << 25 ) )
ZFSRecvPipeCapacityHint = int ( envconst . Int64 ( "ZFS_RECV_PIPE_CAPACITY_HINT" , 1 << 25 ) )
2017-04-26 17:39:16 +02:00
)
2017-04-14 19:26:32 +02:00
2017-08-05 21:15:37 +02:00
type DatasetPath struct {
comps [ ] string
}
2017-04-26 17:39:16 +02:00
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) ToString ( ) string {
2017-08-05 21:15:37 +02:00
return strings . Join ( p . comps , "/" )
2017-04-26 17:39:16 +02:00
}
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) Empty ( ) bool {
2017-08-05 21:15:37 +02:00
return len ( p . comps ) == 0
}
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) Extend ( extend * DatasetPath ) {
2017-08-05 21:15:37 +02:00
p . comps = append ( p . comps , extend . comps ... )
}
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) HasPrefix ( prefix * DatasetPath ) bool {
2017-08-05 21:15:37 +02:00
if len ( prefix . comps ) > len ( p . comps ) {
return false
}
for i := range prefix . comps {
if prefix . comps [ i ] != p . comps [ i ] {
return false
}
}
return true
2017-04-30 16:13:05 +02:00
}
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) TrimPrefix ( prefix * DatasetPath ) {
2017-08-05 21:15:37 +02:00
if ! p . HasPrefix ( prefix ) {
return
}
prelen := len ( prefix . comps )
newlen := len ( p . comps ) - prelen
oldcomps := p . comps
p . comps = make ( [ ] string , newlen )
for i := 0 ; i < newlen ; i ++ {
p . comps [ i ] = oldcomps [ prelen + i ]
}
}
func ( p * DatasetPath ) TrimNPrefixComps ( n int ) {
if len ( p . comps ) < n {
n = len ( p . comps )
}
if n == 0 {
return
}
p . comps = p . comps [ n : ]
}
2017-08-06 13:04:29 +02:00
func ( p DatasetPath ) Equal ( q * DatasetPath ) bool {
2017-08-05 21:15:37 +02:00
if len ( p . comps ) != len ( q . comps ) {
return false
}
for i := range p . comps {
if p . comps [ i ] != q . comps [ i ] {
return false
}
}
return true
}
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) Length ( ) int {
2017-08-05 21:15:37 +02:00
return len ( p . comps )
}
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) Copy ( ) ( c * DatasetPath ) {
c = & DatasetPath { }
2017-08-05 21:15:37 +02:00
c . comps = make ( [ ] string , len ( p . comps ) )
copy ( c . comps , p . comps )
return
}
2017-04-30 16:13:05 +02:00
2017-08-06 13:04:29 +02:00
func ( p * DatasetPath ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( p . comps )
}
func ( p * DatasetPath ) UnmarshalJSON ( b [ ] byte ) error {
p . comps = make ( [ ] string , 0 )
return json . Unmarshal ( b , & p . comps )
}
func NewDatasetPath ( s string ) ( p * DatasetPath , err error ) {
p = & DatasetPath { }
2017-04-30 16:13:05 +02:00
if s == "" {
2017-08-05 21:15:37 +02:00
p . comps = make ( [ ] string , 0 )
return p , nil // the empty dataset path
2017-04-30 16:13:05 +02:00
}
2018-06-19 19:51:40 +02:00
const FORBIDDEN = "@#|\t<>*"
/ * Documenation of allowed characters in zfs names :
https : //docs.oracle.com/cd/E19253-01/819-5461/gbcpt/index.html
Space is missing in the oracle list , but according to
https : //github.com/zfsonlinux/zfs/issues/439
there is evidence that it was intentionally allowed
* /
if strings . ContainsAny ( s , FORBIDDEN ) {
2017-08-05 21:15:37 +02:00
err = fmt . Errorf ( "contains forbidden characters (any of '%s')" , FORBIDDEN )
return
}
p . comps = strings . Split ( s , "/" )
if p . comps [ len ( p . comps ) - 1 ] == "" {
err = fmt . Errorf ( "must not end with a '/'" )
return
2017-05-01 20:09:17 +02:00
}
2017-08-05 21:15:37 +02:00
return
2017-04-26 18:36:01 +02:00
}
2017-08-06 13:04:29 +02:00
func toDatasetPath ( s string ) * DatasetPath {
2017-05-01 20:35:04 +02:00
p , err := NewDatasetPath ( s )
if err != nil {
panic ( err )
}
return p
2017-04-26 17:39:16 +02:00
}
type ZFSError struct {
Stderr [ ] byte
WaitErr error
}
2018-12-11 22:01:50 +01:00
func ( e * ZFSError ) Error ( ) string {
2018-09-03 00:32:08 +02:00
return fmt . Sprintf ( "zfs exited with error: %s\nstderr:\n%s" , e . WaitErr . Error ( ) , e . Stderr )
2017-04-26 17:39:16 +02:00
}
var ZFS_BINARY string = "zfs"
2017-05-01 20:35:04 +02:00
func ZFSList ( properties [ ] string , zfsArgs ... string ) ( res [ ] [ ] string , err error ) {
2017-04-14 19:26:32 +02:00
2017-05-01 20:35:04 +02:00
args := make ( [ ] string , 0 , 4 + len ( zfsArgs ) )
2017-04-30 16:13:05 +02:00
args = append ( args ,
2017-05-06 10:57:43 +02:00
"list" , "-H" , "-p" ,
2017-05-01 20:35:04 +02:00
"-o" , strings . Join ( properties , "," ) )
args = append ( args , zfsArgs ... )
2017-04-30 16:13:05 +02:00
cmd := exec . Command ( ZFS_BINARY , args ... )
2017-04-26 17:39:16 +02:00
var stdout io . Reader
2017-05-01 20:35:04 +02:00
stderr := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stderr = stderr
2017-04-26 17:39:16 +02:00
if stdout , err = cmd . StdoutPipe ( ) ; err != nil {
return
}
if err = cmd . Start ( ) ; err != nil {
return
}
s := bufio . NewScanner ( stdout )
buf := make ( [ ] byte , 1024 )
s . Buffer ( buf , 0 )
2017-05-01 20:35:04 +02:00
res = make ( [ ] [ ] string , 0 )
2017-04-26 17:39:16 +02:00
for s . Scan ( ) {
2017-05-01 20:35:04 +02:00
fields := strings . SplitN ( s . Text ( ) , "\t" , len ( properties ) )
if len ( fields ) != len ( properties ) {
2017-04-26 17:39:16 +02:00
err = errors . New ( "unexpected output" )
return
}
2017-05-01 20:35:04 +02:00
res = append ( res , fields )
2017-04-26 17:39:16 +02:00
}
2017-04-26 20:25:53 +02:00
if waitErr := cmd . Wait ( ) ; waitErr != nil {
2018-12-11 22:01:50 +01:00
err := & ZFSError {
2017-05-01 20:35:04 +02:00
Stderr : stderr . Bytes ( ) ,
2017-04-26 17:39:16 +02:00
WaitErr : waitErr ,
}
return nil , err
}
return
}
2017-05-07 12:18:54 +02:00
2018-02-18 13:28:46 +01:00
type ZFSListResult struct {
2018-06-20 20:20:37 +02:00
Fields [ ] string
Err error
2018-02-18 13:28:46 +01:00
}
// ZFSListChan executes `zfs list` and sends the results to the `out` channel.
// The `out` channel is always closed by ZFSListChan:
2018-06-20 20:20:37 +02:00
// If an error occurs, it is closed after sending a result with the Err field set.
2018-02-18 13:28:46 +01:00
// If no error occurs, it is just closed.
// If the operation is cancelled via context, the channel is just closed.
//
// However, if callers do not drain `out` or cancel via `ctx`, the process will leak either running because
// IO is pending or as a zombie.
func ZFSListChan ( ctx context . Context , out chan ZFSListResult , properties [ ] string , zfsArgs ... string ) {
defer close ( out )
args := make ( [ ] string , 0 , 4 + len ( zfsArgs ) )
args = append ( args ,
"list" , "-H" , "-p" ,
"-o" , strings . Join ( properties , "," ) )
args = append ( args , zfsArgs ... )
sendResult := func ( fields [ ] string , err error ) ( done bool ) {
select {
case <- ctx . Done ( ) :
return true
case out <- ZFSListResult { fields , err } :
return false
}
}
2018-12-11 22:01:50 +01:00
cmd := exec . CommandContext ( ctx , ZFS_BINARY , args ... )
stdout , err := cmd . StdoutPipe ( )
2018-02-18 13:28:46 +01:00
if err != nil {
sendResult ( nil , err )
return
}
2018-12-11 22:01:50 +01:00
// TODO bounded buffer
stderr := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stderr = stderr
2018-02-18 13:28:46 +01:00
if err = cmd . Start ( ) ; err != nil {
sendResult ( nil , err )
return
}
2018-12-11 22:01:50 +01:00
defer func ( ) {
2019-03-22 20:45:27 +01:00
// discard the error, this defer is only relevant if we return while parsing the output
// in which case we'll return an 'unexpected output' error and not the exit status
_ = cmd . Wait ( )
2018-12-11 22:01:50 +01:00
} ( )
2018-02-18 13:28:46 +01:00
2018-12-11 22:01:50 +01:00
s := bufio . NewScanner ( stdout )
2018-02-18 13:28:46 +01:00
buf := make ( [ ] byte , 1024 ) // max line length
s . Buffer ( buf , 0 )
for s . Scan ( ) {
fields := strings . SplitN ( s . Text ( ) , "\t" , len ( properties ) )
if len ( fields ) != len ( properties ) {
sendResult ( nil , errors . New ( "unexpected output" ) )
return
}
if sendResult ( fields , nil ) {
return
}
}
2018-12-11 22:01:50 +01:00
if err := cmd . Wait ( ) ; err != nil {
if err , ok := err . ( * exec . ExitError ) ; ok {
sendResult ( nil , & ZFSError {
Stderr : stderr . Bytes ( ) ,
WaitErr : err ,
} )
} else {
sendResult ( nil , & ZFSError { WaitErr : err } )
}
return
}
2018-02-18 13:28:46 +01:00
if s . Err ( ) != nil {
sendResult ( nil , s . Err ( ) )
2018-12-11 22:01:50 +01:00
return
2018-02-18 13:28:46 +01:00
}
}
2018-06-20 20:20:37 +02:00
func validateRelativeZFSVersion ( s string ) error {
if len ( s ) <= 1 {
return errors . New ( "version must start with a delimiter char followed by at least one character" )
}
if ! ( s [ 0 ] == '#' || s [ 0 ] == '@' ) {
return errors . New ( "version name starts with invalid delimiter char" )
}
// FIXME whitespace check...
return nil
}
func validateZFSFilesystem ( fs string ) error {
if len ( fs ) < 1 {
return errors . New ( "filesystem path must have length > 0" )
}
return nil
}
func absVersion ( fs , v string ) ( full string , err error ) {
if err := validateZFSFilesystem ( fs ) ; err != nil {
return "" , err
}
if err := validateRelativeZFSVersion ( v ) ; err != nil {
return "" , err
}
return fmt . Sprintf ( "%s%s" , fs , v ) , nil
}
2018-10-18 15:45:58 +02:00
func buildCommonSendArgs ( fs string , from , to string , token string ) ( [ ] string , error ) {
args := make ( [ ] string , 0 , 3 )
if token != "" {
args = append ( args , "-t" , token )
return args , nil
}
2018-06-20 20:20:37 +02:00
2018-10-14 13:06:23 +02:00
toV , err := absVersion ( fs , to )
2018-06-20 20:20:37 +02:00
if err != nil {
return nil , err
}
2018-10-14 13:06:23 +02:00
fromV := ""
if from != "" {
fromV , err = absVersion ( fs , from )
2018-06-20 20:20:37 +02:00
if err != nil {
return nil , err
}
}
2017-05-07 12:18:54 +02:00
2018-10-14 13:06:23 +02:00
if fromV == "" { // Initial
args = append ( args , toV )
2017-05-07 12:18:54 +02:00
} else {
2018-06-20 20:20:37 +02:00
args = append ( args , "-i" , fromV , toV )
2017-05-07 12:18:54 +02:00
}
2018-10-18 15:45:58 +02:00
return args , nil
}
2018-12-11 22:01:50 +01:00
type sendStreamCopier struct {
recorder readErrRecorder
}
type readErrRecorder struct {
io . ReadCloser
readErr error
}
type sendStreamCopierError struct {
isReadErr bool // if false, it's a write error
2019-03-22 19:41:12 +01:00
err error
2018-12-11 22:01:50 +01:00
}
func ( e sendStreamCopierError ) Error ( ) string {
if e . isReadErr {
return fmt . Sprintf ( "stream: read error: %s" , e . err )
} else {
return fmt . Sprintf ( "stream: writer error: %s" , e . err )
}
}
2019-03-22 19:41:12 +01:00
func ( e sendStreamCopierError ) IsReadError ( ) bool { return e . isReadErr }
2018-12-11 22:01:50 +01:00
func ( e sendStreamCopierError ) IsWriteError ( ) bool { return ! e . isReadErr }
func ( r * readErrRecorder ) Read ( p [ ] byte ) ( n int , err error ) {
n , err = r . ReadCloser . Read ( p )
r . readErr = err
return n , err
}
func newSendStreamCopier ( stream io . ReadCloser ) * sendStreamCopier {
return & sendStreamCopier { recorder : readErrRecorder { stream , nil } }
}
func ( c * sendStreamCopier ) WriteStreamTo ( w io . Writer ) StreamCopierError {
debug ( "sendStreamCopier.WriteStreamTo: begin" )
_ , err := io . Copy ( w , & c . recorder )
debug ( "sendStreamCopier.WriteStreamTo: copy done" )
if err != nil {
if c . recorder . readErr != nil {
return sendStreamCopierError { isReadErr : true , err : c . recorder . readErr }
} else {
return sendStreamCopierError { isReadErr : false , err : err }
}
}
return nil
}
func ( c * sendStreamCopier ) Read ( p [ ] byte ) ( n int , err error ) {
return c . recorder . Read ( p )
}
func ( c * sendStreamCopier ) Close ( ) error {
return c . recorder . ReadCloser . Close ( )
}
func pipeWithCapacityHint ( capacity int ) ( r , w * os . File , err error ) {
if capacity <= 0 {
panic ( fmt . Sprintf ( "capacity must be positive %v" , capacity ) )
}
stdoutReader , stdoutWriter , err := os . Pipe ( )
if err != nil {
return nil , nil , err
}
trySetPipeCapacity ( stdoutWriter , capacity )
return stdoutReader , stdoutWriter , nil
}
type sendStream struct {
2019-03-22 19:41:12 +01:00
cmd * exec . Cmd
2018-12-11 22:01:50 +01:00
kill context . CancelFunc
2019-03-22 19:41:12 +01:00
closeMtx sync . Mutex
2018-12-11 22:01:50 +01:00
stdoutReader * os . File
2019-03-22 19:41:12 +01:00
opErr error
2018-12-11 22:01:50 +01:00
}
func ( s * sendStream ) Read ( p [ ] byte ) ( n int , err error ) {
s . closeMtx . Lock ( )
opErr := s . opErr
s . closeMtx . Unlock ( )
if opErr != nil {
return 0 , opErr
}
n , err = s . stdoutReader . Read ( p )
if err != nil {
debug ( "sendStream: read err: %T %s" , err , err )
// TODO we assume here that any read error is permanent
// which is most likely the case for a local zfs send
kwerr := s . killAndWait ( err )
debug ( "sendStream: killAndWait n=%v err= %T %s" , n , kwerr , kwerr )
// TODO we assume here that any read error is permanent
return n , kwerr
}
return n , err
}
func ( s * sendStream ) Close ( ) error {
debug ( "sendStream: close called" )
return s . killAndWait ( nil )
}
func ( s * sendStream ) killAndWait ( precedingReadErr error ) error {
debug ( "sendStream: killAndWait enter" )
defer debug ( "sendStream: killAndWait leave" )
if precedingReadErr == io . EOF {
// give the zfs process a little bit of time to terminate itself
// if it holds this deadline, exitErr will be nil
time . AfterFunc ( 200 * time . Millisecond , s . kill )
} else {
s . kill ( )
}
// allow async kills from Close(), that's why we only take the mutex here
s . closeMtx . Lock ( )
defer s . closeMtx . Unlock ( )
if s . opErr != nil {
return s . opErr
}
waitErr := s . cmd . Wait ( )
// distinguish between ExitError (which is actually a non-problem for us)
// vs failed wait syscall (for which we give upper layers the chance to retyr)
var exitErr * exec . ExitError
if waitErr != nil {
if ee , ok := waitErr . ( * exec . ExitError ) ; ok {
exitErr = ee
} else {
return waitErr
}
}
// now, after we know the program exited do we close the pipe
var closePipeErr error
if s . stdoutReader != nil {
closePipeErr = s . stdoutReader . Close ( )
if closePipeErr == nil {
// avoid double-closes in case anything below doesn't work
// and someone calls Close again
2019-03-22 19:41:12 +01:00
s . stdoutReader = nil
2018-12-11 22:01:50 +01:00
} else {
return closePipeErr
}
}
// we managed to tear things down, no let's give the user some pretty *ZFSError
if exitErr != nil {
s . opErr = & ZFSError {
2019-03-22 19:41:12 +01:00
Stderr : exitErr . Stderr ,
2018-12-11 22:01:50 +01:00
WaitErr : exitErr ,
}
} else {
s . opErr = fmt . Errorf ( "zfs send exited with status code 0" )
}
// detect the edge where we're called from s.Read
// after the pipe EOFed and zfs send exited without errors
// this is actullay the "hot" / nice path
if exitErr == nil && precedingReadErr == io . EOF {
return precedingReadErr
}
return s . opErr
}
2018-10-18 15:45:58 +02:00
// if token != "", then send -t token is used
// otherwise send [-i from] to is used
// (if from is "" a full ZFS send is done)
2018-12-11 22:01:50 +01:00
func ZFSSend ( ctx context . Context , fs string , from , to string , token string ) ( streamCopier StreamCopier , err error ) {
2018-10-18 15:45:58 +02:00
args := make ( [ ] string , 0 )
args = append ( args , "send" )
sargs , err := buildCommonSendArgs ( fs , from , to , token )
if err != nil {
return nil , err
}
args = append ( args , sargs ... )
2017-05-07 12:18:54 +02:00
2018-12-11 22:01:50 +01:00
ctx , cancel := context . WithCancel ( ctx )
cmd := exec . CommandContext ( ctx , ZFS_BINARY , args ... )
2017-05-07 12:18:54 +02:00
2018-12-11 22:01:50 +01:00
// setup stdout with an os.Pipe to control pipe buffer size
stdoutReader , stdoutWriter , err := pipeWithCapacityHint ( ZFSSendPipeCapacityHint )
if err != nil {
cancel ( )
return nil , err
}
cmd . Stdout = stdoutWriter
if err := cmd . Start ( ) ; err != nil {
cancel ( )
stdoutWriter . Close ( )
stdoutReader . Close ( )
return nil , err
}
stdoutWriter . Close ( )
stream := & sendStream {
2019-03-22 19:41:12 +01:00
cmd : cmd ,
kill : cancel ,
2018-12-11 22:01:50 +01:00
stdoutReader : stdoutReader ,
}
return newSendStreamCopier ( stream ) , err
2017-05-07 12:18:54 +02:00
}
2017-05-07 12:22:57 +02:00
2018-10-18 15:45:58 +02:00
type DrySendType string
2018-08-29 23:29:45 +02:00
2018-10-18 15:45:58 +02:00
const (
DrySendTypeFull DrySendType = "full"
DrySendTypeIncremental DrySendType = "incremental"
)
func DrySendTypeFromString ( s string ) ( DrySendType , error ) {
switch s {
2019-03-22 19:41:12 +01:00
case string ( DrySendTypeFull ) :
return DrySendTypeFull , nil
case string ( DrySendTypeIncremental ) :
return DrySendTypeIncremental , nil
2018-10-18 15:45:58 +02:00
default :
return "" , fmt . Errorf ( "unknown dry send type %q" , s )
2018-08-29 23:29:45 +02:00
}
2018-10-18 15:45:58 +02:00
}
2018-08-29 23:29:45 +02:00
2018-10-18 15:45:58 +02:00
type DrySendInfo struct {
2019-03-22 19:41:12 +01:00
Type DrySendType
Filesystem string // parsed from To field
From , To string // direct copy from ZFS output
SizeEstimate int64 // -1 if size estimate is not possible
2018-10-18 15:45:58 +02:00
}
2019-03-21 16:08:05 +01:00
var (
// keep same number of capture groups for unmarshalInfoLine homogenity
sendDryRunInfoLineRegexFull = regexp . MustCompile ( ` ^(full)\t()([^\t]+@[^\t]+)\t([0-9]+)$ ` )
2019-03-22 19:41:12 +01:00
// cannot enforce '[#@]' in incremental source, see test cases
2019-03-21 16:08:05 +01:00
sendDryRunInfoLineRegexIncremental = regexp . MustCompile ( ` ^(incremental)\t([^\t]+)\t([^\t]+@[^\t]+)\t([0-9]+)$ ` )
)
2018-10-18 15:45:58 +02:00
// see test cases for example output
func ( s * DrySendInfo ) unmarshalZFSOutput ( output [ ] byte ) ( err error ) {
2019-03-21 16:08:05 +01:00
debug ( "DrySendInfo.unmarshalZFSOutput: output=%q" , output )
2018-10-18 15:45:58 +02:00
lines := strings . Split ( string ( output ) , "\n" )
for _ , l := range lines {
regexMatched , err := s . unmarshalInfoLine ( l )
2018-08-29 23:29:45 +02:00
if err != nil {
2018-10-18 15:45:58 +02:00
return fmt . Errorf ( "line %q: %s" , l , err )
2018-08-29 23:29:45 +02:00
}
2018-10-18 15:45:58 +02:00
if ! regexMatched {
continue
}
return nil
}
2019-03-21 16:08:05 +01:00
return fmt . Errorf ( "no match for info line (regex1 %s) (regex2 %s)" , sendDryRunInfoLineRegexFull , sendDryRunInfoLineRegexIncremental )
2018-10-18 15:45:58 +02:00
}
// unmarshal info line, looks like this:
// full zroot/test/a@1 5389768
// incremental zroot/test/a@1 zroot/test/a@2 5383936
// => see test cases
func ( s * DrySendInfo ) unmarshalInfoLine ( l string ) ( regexMatched bool , err error ) {
2019-03-21 16:08:05 +01:00
mFull := sendDryRunInfoLineRegexFull . FindStringSubmatch ( l )
mInc := sendDryRunInfoLineRegexIncremental . FindStringSubmatch ( l )
var m [ ] string
if mFull == nil && mInc == nil {
2018-10-18 15:45:58 +02:00
return false , nil
2019-03-21 16:08:05 +01:00
} else if mFull != nil && mInc != nil {
panic ( fmt . Sprintf ( "ambiguous ZFS dry send output: %q" , l ) )
} else if mFull != nil {
m = mFull
} else if mInc != nil {
m = mInc
2018-10-18 15:45:58 +02:00
}
s . Type , err = DrySendTypeFromString ( m [ 1 ] )
if err != nil {
return true , err
2018-08-29 23:29:45 +02:00
}
2019-03-21 16:08:05 +01:00
s . From = m [ 2 ]
s . To = m [ 3 ]
toFS , _ , _ , err := DecomposeVersionString ( s . To )
2018-10-18 15:45:58 +02:00
if err != nil {
return true , fmt . Errorf ( "'to' is not a valid filesystem version: %s" , err )
}
s . Filesystem = toFS
2019-03-21 16:08:05 +01:00
s . SizeEstimate , err = strconv . ParseInt ( m [ 4 ] , 10 , 64 )
2018-10-18 15:45:58 +02:00
if err != nil {
return true , fmt . Errorf ( "cannot not parse size: %s" , err )
}
return true , nil
}
// from may be "", in which case a full ZFS send is done
// May return BookmarkSizeEstimationNotSupported as err if from is a bookmark.
func ZFSSendDry ( fs string , from , to string , token string ) ( _ * DrySendInfo , err error ) {
if strings . Contains ( from , "#" ) {
2018-09-06 02:41:25 +02:00
/ * TODO :
* ZFS at the time of writing does not support dry - run send because size - estimation
* uses fromSnap ' s deadlist . However , for a bookmark , that deadlist no longer exists .
* Redacted send & recv will bring this functionality , see
* https : //github.com/openzfs/openzfs/pull/484
* /
2019-03-22 19:41:12 +01:00
fromAbs , err := absVersion ( fs , from )
if err != nil {
return nil , fmt . Errorf ( "error building abs version for 'from': %s" , err )
}
toAbs , err := absVersion ( fs , to )
if err != nil {
return nil , fmt . Errorf ( "error building abs version for 'to': %s" , err )
}
return & DrySendInfo {
Type : DrySendTypeIncremental ,
Filesystem : fs ,
From : fromAbs ,
To : toAbs ,
2018-10-18 15:45:58 +02:00
SizeEstimate : - 1 } , nil
2018-09-06 02:41:25 +02:00
}
2018-08-29 23:29:45 +02:00
args := make ( [ ] string , 0 )
args = append ( args , "send" , "-n" , "-v" , "-P" )
2018-10-18 15:45:58 +02:00
sargs , err := buildCommonSendArgs ( fs , from , to , token )
if err != nil {
return nil , err
2018-08-29 23:29:45 +02:00
}
2018-10-18 15:45:58 +02:00
args = append ( args , sargs ... )
2018-08-29 23:29:45 +02:00
cmd := exec . Command ( ZFS_BINARY , args ... )
output , err := cmd . CombinedOutput ( )
if err != nil {
2018-10-18 15:45:58 +02:00
return nil , err
2018-08-29 23:29:45 +02:00
}
2018-10-18 15:45:58 +02:00
var si DrySendInfo
if err := si . unmarshalZFSOutput ( output ) ; err != nil {
return nil , fmt . Errorf ( "could not parse zfs send -n output: %s" , err )
2018-08-29 23:29:45 +02:00
}
2018-10-18 15:45:58 +02:00
return & si , nil
2018-08-29 23:29:45 +02:00
}
2018-12-11 22:01:50 +01:00
type StreamCopierError interface {
error
IsReadError ( ) bool
IsWriteError ( ) bool
}
type StreamCopier interface {
// WriteStreamTo writes the stream represented by this StreamCopier
// to the given io.Writer.
WriteStreamTo ( w io . Writer ) StreamCopierError
// Close must be called as soon as it is clear that no more data will
// be read from the StreamCopier.
// If StreamCopier gets its data from a connection, it might hold
// a lock on the connection until Close is called. Only closing ensures
// that the connection can be used afterwards.
Close ( ) error
}
2018-10-18 15:45:58 +02:00
2019-03-13 18:33:20 +01:00
type RecvOptions struct {
// Rollback to the oldest snapshot, destroy it, then perform `recv -F`.
// Note that this doesn't change property values, i.e. an existing local property value will be kept.
RollbackAndForceRecv bool
}
2018-12-11 22:01:50 +01:00
2019-03-13 18:33:20 +01:00
func ZFSRecv ( ctx context . Context , fs string , streamCopier StreamCopier , opts RecvOptions ) ( err error ) {
2018-06-20 20:20:37 +02:00
if err := validateZFSFilesystem ( fs ) ; err != nil {
return err
}
2019-03-13 18:33:20 +01:00
fsdp , err := NewDatasetPath ( fs )
if err != nil {
return err
}
if opts . RollbackAndForceRecv {
// destroy all snapshots before `recv -F` because `recv -F`
// does not perform a rollback unless `send -R` was used (which we assume hasn't been the case)
var snaps [ ] FilesystemVersion
{
vs , err := ZFSListFilesystemVersions ( fsdp , nil )
if err != nil {
2019-03-22 20:45:27 +01:00
return fmt . Errorf ( "cannot list versions for rollback for forced receive: %s" , err )
2019-03-13 18:33:20 +01:00
}
for _ , v := range vs {
if v . Type == Snapshot {
snaps = append ( snaps , v )
}
}
sort . Slice ( snaps , func ( i , j int ) bool {
return snaps [ i ] . CreateTXG < snaps [ j ] . CreateTXG
} )
}
// bookmarks are rolled back automatically
if len ( snaps ) > 0 {
// use rollback to efficiently destroy all but the earliest snapshot
// then destroy that earliest snapshot
// afterwards, `recv -F` will work
rollbackTarget := snaps [ 0 ]
rollbackTargetAbs := rollbackTarget . ToAbsPath ( fsdp )
debug ( "recv: rollback to %q" , rollbackTargetAbs )
if err := ZFSRollback ( fsdp , rollbackTarget , "-r" ) ; err != nil {
return fmt . Errorf ( "cannot rollback %s to %s for forced receive: %s" , fsdp . ToString ( ) , rollbackTarget , err )
}
debug ( "recv: destroy %q" , rollbackTargetAbs )
if err := ZFSDestroy ( rollbackTargetAbs ) ; err != nil {
return fmt . Errorf ( "cannot destroy %s for forced receive: %s" , rollbackTargetAbs , err )
}
}
}
2017-05-07 12:22:57 +02:00
args := make ( [ ] string , 0 )
args = append ( args , "recv" )
2019-03-13 18:33:20 +01:00
if opts . RollbackAndForceRecv {
args = append ( args , "-F" )
2017-05-07 12:22:57 +02:00
}
2018-06-20 20:20:37 +02:00
args = append ( args , fs )
2017-05-07 12:22:57 +02:00
2018-12-11 22:01:50 +01:00
ctx , cancelCmd := context . WithCancel ( ctx )
defer cancelCmd ( )
2018-10-19 16:12:21 +02:00
cmd := exec . CommandContext ( ctx , ZFS_BINARY , args ... )
2017-05-07 12:22:57 +02:00
stderr := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stderr = stderr
// TODO report bug upstream
// Setup an unused stdout buffer.
// Otherwise, ZoL v0.6.5.9-1 3.16.0-4-amd64 writes the following error to stderr and exits with code 1
// cannot receive new filesystem stream: invalid backup stream
stdout := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stdout = stdout
2018-12-11 22:01:50 +01:00
stdin , stdinWriter , err := pipeWithCapacityHint ( ZFSRecvPipeCapacityHint )
if err != nil {
return err
}
2019-03-22 19:41:12 +01:00
2018-12-11 22:01:50 +01:00
cmd . Stdin = stdin
2017-05-07 12:22:57 +02:00
if err = cmd . Start ( ) ; err != nil {
2018-12-11 22:01:50 +01:00
stdinWriter . Close ( )
stdin . Close ( )
return err
}
stdin . Close ( )
defer stdinWriter . Close ( )
2019-03-22 19:41:12 +01:00
2018-12-11 22:01:50 +01:00
pid := cmd . Process . Pid
debug := func ( format string , args ... interface { } ) {
debug ( "recv: pid=%v: %s" , pid , fmt . Sprintf ( format , args ... ) )
2017-05-07 12:22:57 +02:00
}
2018-12-11 22:01:50 +01:00
debug ( "started" )
copierErrChan := make ( chan StreamCopierError )
go func ( ) {
copierErrChan <- streamCopier . WriteStreamTo ( stdinWriter )
} ( )
waitErrChan := make ( chan * ZFSError )
go func ( ) {
defer close ( waitErrChan )
if err = cmd . Wait ( ) ; err != nil {
waitErrChan <- & ZFSError {
Stderr : stderr . Bytes ( ) ,
WaitErr : err ,
}
return
2017-05-07 12:22:57 +02:00
}
2018-12-11 22:01:50 +01:00
} ( )
// streamCopier always fails before or simultaneously with Wait
// thus receive from it first
copierErr := <- copierErrChan
debug ( "copierErr: %T %s" , copierErr , copierErr )
if copierErr != nil {
2019-03-22 19:41:12 +01:00
cancelCmd ( )
2017-05-07 12:22:57 +02:00
}
2018-12-11 22:01:50 +01:00
waitErr := <- waitErrChan
debug ( "waitErr: %T %s" , waitErr , waitErr )
if copierErr == nil && waitErr == nil {
return nil
} else if waitErr != nil && ( copierErr == nil || copierErr . IsWriteError ( ) ) {
return waitErr // has more interesting info in that case
}
return copierErr // if it's not a write error, the copier error is more interesting
2018-10-18 15:45:58 +02:00
}
type ClearResumeTokenError struct {
ZFSOutput [ ] byte
2019-03-22 19:41:12 +01:00
CmdError error
2018-10-18 15:45:58 +02:00
}
func ( e ClearResumeTokenError ) Error ( ) string {
return fmt . Sprintf ( "could not clear resume token: %q" , string ( e . ZFSOutput ) )
}
// always returns *ClearResumeTokenError
func ZFSRecvClearResumeToken ( fs string ) ( err error ) {
if err := validateZFSFilesystem ( fs ) ; err != nil {
return err
}
cmd := exec . Command ( ZFS_BINARY , "recv" , "-A" , fs )
o , err := cmd . CombinedOutput ( )
if err != nil {
if bytes . Contains ( o , [ ] byte ( "does not have any resumable receive state to abort" ) ) {
return nil
}
return & ClearResumeTokenError { o , err }
}
2017-05-07 12:22:57 +02:00
return nil
}
2018-01-05 18:42:10 +01:00
type ZFSProperties struct {
m map [ string ] string
}
func NewZFSProperties ( ) * ZFSProperties {
return & ZFSProperties { make ( map [ string ] string , 4 ) }
}
func ( p * ZFSProperties ) Set ( key , val string ) {
p . m [ key ] = val
}
2018-06-20 20:20:37 +02:00
func ( p * ZFSProperties ) Get ( key string ) string {
return p . m [ key ]
}
2018-01-05 18:42:10 +01:00
func ( p * ZFSProperties ) appendArgs ( args * [ ] string ) ( err error ) {
for prop , val := range p . m {
if strings . Contains ( prop , "=" ) {
return errors . New ( "prop contains rune '=' which is the delimiter between property name and value" )
}
* args = append ( * args , fmt . Sprintf ( "%s=%s" , prop , val ) )
}
return nil
}
2017-05-07 12:23:12 +02:00
2018-01-05 18:42:10 +01:00
func ZFSSet ( fs * DatasetPath , props * ZFSProperties ) ( err error ) {
2018-08-30 11:51:47 +02:00
return zfsSet ( fs . ToString ( ) , props )
}
2018-01-05 18:42:10 +01:00
2018-08-30 11:51:47 +02:00
func zfsSet ( path string , props * ZFSProperties ) ( err error ) {
2018-01-05 18:42:10 +01:00
args := make ( [ ] string , 0 )
args = append ( args , "set" )
err = props . appendArgs ( & args )
if err != nil {
return err
2017-05-07 12:23:12 +02:00
}
2018-08-30 11:51:47 +02:00
args = append ( args , path )
2017-05-07 12:23:12 +02:00
2018-01-05 18:42:10 +01:00
cmd := exec . Command ( ZFS_BINARY , args ... )
2017-05-07 12:23:12 +02:00
stderr := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stderr = stderr
if err = cmd . Start ( ) ; err != nil {
return err
}
if err = cmd . Wait ( ) ; err != nil {
2018-12-11 22:01:50 +01:00
err = & ZFSError {
2017-05-07 12:23:12 +02:00
Stderr : stderr . Bytes ( ) ,
WaitErr : err ,
}
}
return
}
2017-07-01 18:21:18 +02:00
2018-06-20 20:20:37 +02:00
func ZFSGet ( fs * DatasetPath , props [ ] string ) ( * ZFSProperties , error ) {
2018-09-06 04:44:35 +02:00
return zfsGet ( fs . ToString ( ) , props , sourceAny )
2018-08-30 11:51:47 +02:00
}
2019-08-20 17:04:13 +02:00
func ZFSGetRawAnySource ( path string , props [ ] string ) ( * ZFSProperties , error ) {
return zfsGet ( path , props , sourceAny )
}
2019-10-14 17:46:14 +02:00
var zfsGetDatasetDoesNotExistRegexp = regexp . MustCompile ( ` ^cannot open '([^)]+)': (dataset does not exist|no such pool or dataset) ` ) // verified in platformtest
2018-06-20 20:20:37 +02:00
2018-08-30 11:51:47 +02:00
type DatasetDoesNotExist struct {
Path string
}
func ( d * DatasetDoesNotExist ) Error ( ) string { return fmt . Sprintf ( "dataset %q does not exist" , d . Path ) }
2018-09-06 04:44:35 +02:00
type zfsPropertySource uint
const (
sourceLocal zfsPropertySource = 1 << iota
sourceDefault
sourceInherited
sourceNone
sourceTemporary
2018-11-16 11:12:29 +01:00
sourceReceived
2018-09-06 04:44:35 +02:00
sourceAny zfsPropertySource = ^ zfsPropertySource ( 0 )
)
func ( s zfsPropertySource ) zfsGetSourceFieldPrefixes ( ) [ ] string {
2018-11-16 11:12:29 +01:00
prefixes := make ( [ ] string , 0 , 7 )
2019-03-22 19:41:12 +01:00
if s & sourceLocal != 0 {
prefixes = append ( prefixes , "local" )
}
if s & sourceDefault != 0 {
prefixes = append ( prefixes , "default" )
}
if s & sourceInherited != 0 {
prefixes = append ( prefixes , "inherited" )
}
if s & sourceNone != 0 {
prefixes = append ( prefixes , "-" )
}
if s & sourceTemporary != 0 {
prefixes = append ( prefixes , "temporary" )
}
if s & sourceReceived != 0 {
prefixes = append ( prefixes , "received" )
}
if s == sourceAny {
prefixes = append ( prefixes , "" )
}
2018-09-06 04:44:35 +02:00
return prefixes
}
func zfsGet ( path string , props [ ] string , allowedSources zfsPropertySource ) ( * ZFSProperties , error ) {
args := [ ] string { "get" , "-Hp" , "-o" , "property,value,source" , strings . Join ( props , "," ) , path }
2018-06-20 20:20:37 +02:00
cmd := exec . Command ( ZFS_BINARY , args ... )
2018-09-04 22:30:52 +02:00
stdout , err := cmd . Output ( )
2018-06-20 20:20:37 +02:00
if err != nil {
2018-08-30 11:51:47 +02:00
if exitErr , ok := err . ( * exec . ExitError ) ; ok {
if exitErr . Exited ( ) {
// screen-scrape output
2018-09-04 22:30:52 +02:00
if sm := zfsGetDatasetDoesNotExistRegexp . FindSubmatch ( exitErr . Stderr ) ; sm != nil {
2018-08-30 11:51:47 +02:00
if string ( sm [ 1 ] ) == path {
return nil , & DatasetDoesNotExist { path }
}
}
}
2019-06-08 23:00:00 +02:00
return nil , & ZFSError {
Stderr : exitErr . Stderr ,
WaitErr : exitErr ,
}
2018-08-30 11:51:47 +02:00
}
2018-06-20 20:20:37 +02:00
return nil , err
}
2018-09-04 22:30:52 +02:00
o := string ( stdout )
2018-06-20 20:20:37 +02:00
lines := strings . Split ( o , "\n" )
if len ( lines ) < 1 || // account for newlines
len ( lines ) - 1 != len ( props ) {
return nil , fmt . Errorf ( "zfs get did not return the number of expected property values" )
}
res := & ZFSProperties {
make ( map [ string ] string , len ( lines ) ) ,
}
2018-09-06 04:44:35 +02:00
allowedPrefixes := allowedSources . zfsGetSourceFieldPrefixes ( )
2018-06-20 20:20:37 +02:00
for _ , line := range lines [ : len ( lines ) - 1 ] {
2018-09-06 04:44:35 +02:00
fields := strings . FieldsFunc ( line , func ( r rune ) bool {
return r == '\t'
} )
if len ( fields ) != 3 {
return nil , fmt . Errorf ( "zfs get did not return property,value,source tuples" )
}
for _ , p := range allowedPrefixes {
2019-03-22 19:41:12 +01:00
if strings . HasPrefix ( fields [ 2 ] , p ) {
2018-09-06 04:44:35 +02:00
res . m [ fields [ 0 ] ] = fields [ 1 ]
break
}
2018-06-20 20:20:37 +02:00
}
}
return res , nil
}
2019-10-14 17:48:47 +02:00
type ZFSPropCreateTxgAndGuidProps struct {
CreateTXG , Guid uint64
}
func ZFSGetCreateTXGAndGuid ( ds string ) ( ZFSPropCreateTxgAndGuidProps , error ) {
props , err := zfsGetNumberProps ( ds , [ ] string { "createtxg" , "guid" } , sourceAny )
if err != nil {
return ZFSPropCreateTxgAndGuidProps { } , err
}
return ZFSPropCreateTxgAndGuidProps {
CreateTXG : props [ "createtxg" ] ,
Guid : props [ "guid" ] ,
} , nil
}
// returns *DatasetDoesNotExist if the dataset does not exist
func zfsGetNumberProps ( ds string , props [ ] string , src zfsPropertySource ) ( map [ string ] uint64 , error ) {
sps , err := zfsGet ( ds , props , sourceAny )
if err != nil {
if _ , ok := err . ( * DatasetDoesNotExist ) ; ok {
return nil , err // pass through as is
}
return nil , errors . Wrap ( err , "zfs: set replication cursor: get snapshot createtxg" )
}
r := make ( map [ string ] uint64 , len ( props ) )
for _ , p := range props {
v , err := strconv . ParseUint ( sps . Get ( p ) , 10 , 64 )
if err != nil {
return nil , errors . Wrapf ( err , "zfs get: parse number property %q" , p )
}
r [ p ] = v
}
return r , nil
}
2019-08-12 01:19:08 +02:00
type DestroySnapshotsError struct {
RawLines [ ] string
Filesystem string
Undestroyable [ ] string // snapshot name only (filesystem@ stripped)
Reason [ ] string
}
func ( e * DestroySnapshotsError ) Error ( ) string {
if len ( e . Undestroyable ) != len ( e . Reason ) {
panic ( fmt . Sprintf ( "%v != %v" , len ( e . Undestroyable ) , len ( e . Reason ) ) )
}
if len ( e . Undestroyable ) == 0 {
panic ( fmt . Sprintf ( "error must have one undestroyable snapshot, %q" , e . Filesystem ) )
}
if len ( e . Undestroyable ) == 1 {
return fmt . Sprintf ( "zfs destroy failed: %s@%s: %s" , e . Filesystem , e . Undestroyable [ 0 ] , e . Reason [ 0 ] )
}
return strings . Join ( e . RawLines , "\n" )
}
var destroySnapshotsErrorRegexp = regexp . MustCompile ( ` ^cannot destroy snapshot ([^@]+)@(.+): (.*)$ ` ) // yes, datasets can contain `:`
func tryParseDestroySnapshotsError ( arg string , stderr [ ] byte ) * DestroySnapshotsError {
argComps := strings . SplitN ( arg , "@" , 2 )
if len ( argComps ) != 2 {
return nil
}
filesystem := argComps [ 0 ]
lines := bufio . NewScanner ( bytes . NewReader ( stderr ) )
undestroyable := [ ] string { }
reason := [ ] string { }
rawLines := [ ] string { }
for lines . Scan ( ) {
line := lines . Text ( )
rawLines = append ( rawLines , line )
m := destroySnapshotsErrorRegexp . FindStringSubmatch ( line )
if m == nil {
return nil // unexpected line => be conservative
} else {
if m [ 1 ] != filesystem {
return nil // unexpected line => be conservative
}
undestroyable = append ( undestroyable , m [ 2 ] )
reason = append ( reason , m [ 3 ] )
}
}
if len ( undestroyable ) == 0 {
return nil
}
return & DestroySnapshotsError {
RawLines : rawLines ,
Filesystem : filesystem ,
Undestroyable : undestroyable ,
Reason : reason ,
}
}
func ZFSDestroy ( arg string ) ( err error ) {
2017-07-01 18:21:18 +02:00
2018-09-08 07:03:41 +02:00
var dstype , filesystem string
2019-08-12 01:19:08 +02:00
idx := strings . IndexAny ( arg , "@#" )
2018-09-08 07:03:41 +02:00
if idx == - 1 {
dstype = "filesystem"
2019-08-12 01:19:08 +02:00
filesystem = arg
2018-09-08 07:03:41 +02:00
} else {
2019-08-12 01:19:08 +02:00
switch arg [ idx ] {
2019-03-22 19:41:12 +01:00
case '@' :
dstype = "snapshot"
case '#' :
dstype = "bookmark"
2018-09-08 07:03:41 +02:00
}
2019-08-12 01:19:08 +02:00
filesystem = arg [ : idx ]
2018-09-08 07:03:41 +02:00
}
defer prometheus . NewTimer ( prom . ZFSDestroyDuration . WithLabelValues ( dstype , filesystem ) )
2019-08-12 01:19:08 +02:00
cmd := exec . Command ( ZFS_BINARY , "destroy" , arg )
2017-07-01 18:21:18 +02:00
2019-08-12 01:19:08 +02:00
var stderr bytes . Buffer
cmd . Stderr = & stderr
2017-07-01 18:21:18 +02:00
if err = cmd . Start ( ) ; err != nil {
return err
}
if err = cmd . Wait ( ) ; err != nil {
2018-12-11 22:01:50 +01:00
err = & ZFSError {
2017-07-01 18:21:18 +02:00
Stderr : stderr . Bytes ( ) ,
WaitErr : err ,
}
2019-08-12 01:19:08 +02:00
if dserr := tryParseDestroySnapshotsError ( arg , stderr . Bytes ( ) ) ; dserr != nil {
err = dserr
}
2017-07-01 18:21:18 +02:00
}
return
}
2017-07-01 20:28:46 +02:00
2017-11-12 23:05:18 +01:00
func zfsBuildSnapName ( fs * DatasetPath , name string ) string { // TODO defensive
return fmt . Sprintf ( "%s@%s" , fs . ToString ( ) , name )
}
func zfsBuildBookmarkName ( fs * DatasetPath , name string ) string { // TODO defensive
return fmt . Sprintf ( "%s#%s" , fs . ToString ( ) , name )
}
2017-08-06 13:04:29 +02:00
func ZFSSnapshot ( fs * DatasetPath , name string , recursive bool ) ( err error ) {
2017-07-01 20:28:46 +02:00
2018-04-05 22:12:25 +02:00
promTimer := prometheus . NewTimer ( prom . ZFSSnapshotDuration . WithLabelValues ( fs . ToString ( ) ) )
defer promTimer . ObserveDuration ( )
2017-11-12 23:05:18 +01:00
snapname := zfsBuildSnapName ( fs , name )
2017-07-01 20:28:46 +02:00
cmd := exec . Command ( ZFS_BINARY , "snapshot" , snapname )
stderr := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stderr = stderr
if err = cmd . Start ( ) ; err != nil {
return err
}
if err = cmd . Wait ( ) ; err != nil {
2018-12-11 22:01:50 +01:00
err = & ZFSError {
2017-07-01 20:28:46 +02:00
Stderr : stderr . Bytes ( ) ,
WaitErr : err ,
}
}
return
}
2017-11-12 23:05:18 +01:00
func ZFSBookmark ( fs * DatasetPath , snapshot , bookmark string ) ( err error ) {
2018-04-05 22:12:25 +02:00
promTimer := prometheus . NewTimer ( prom . ZFSBookmarkDuration . WithLabelValues ( fs . ToString ( ) ) )
defer promTimer . ObserveDuration ( )
2017-11-12 23:05:18 +01:00
snapname := zfsBuildSnapName ( fs , snapshot )
bookmarkname := zfsBuildBookmarkName ( fs , bookmark )
2019-03-21 16:59:08 +01:00
debug ( "bookmark: %q %q" , snapname , bookmarkname )
2017-11-12 23:05:18 +01:00
cmd := exec . Command ( ZFS_BINARY , "bookmark" , snapname , bookmarkname )
stderr := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stderr = stderr
if err = cmd . Start ( ) ; err != nil {
return err
}
if err = cmd . Wait ( ) ; err != nil {
2018-12-11 22:01:50 +01:00
err = & ZFSError {
2017-11-12 23:05:18 +01:00
Stderr : stderr . Bytes ( ) ,
WaitErr : err ,
}
}
return
}
2019-03-13 18:33:20 +01:00
func ZFSRollback ( fs * DatasetPath , snapshot FilesystemVersion , rollbackArgs ... string ) ( err error ) {
snapabs := snapshot . ToAbsPath ( fs )
if snapshot . Type != Snapshot {
return fmt . Errorf ( "can only rollback to snapshots, got %s" , snapabs )
}
args := [ ] string { "rollback" }
args = append ( args , rollbackArgs ... )
args = append ( args , snapabs )
cmd := exec . Command ( ZFS_BINARY , args ... )
stderr := bytes . NewBuffer ( make ( [ ] byte , 0 , 1024 ) )
cmd . Stderr = stderr
if err = cmd . Start ( ) ; err != nil {
return err
}
if err = cmd . Wait ( ) ; err != nil {
err = & ZFSError {
Stderr : stderr . Bytes ( ) ,
WaitErr : err ,
}
}
return err
}