mirror of
https://github.com/zrepl/zrepl.git
synced 2025-01-22 06:09:45 +01:00
Implement placeholder filesystems.
Note the docs on the placeholder user property introduced with this commit. The solution is not really satisfying but couldn't think of a better one OTOMH
This commit is contained in:
parent
8eb4a2ba44
commit
4732fdd4cc
@ -243,19 +243,13 @@ func doPull(pull PullContext) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// build mapping (local->RemoteLocalMapping) + traversal datastructure
|
||||
type RemoteLocalMapping struct {
|
||||
Remote zfs.DatasetPath
|
||||
Local zfs.DatasetPath
|
||||
LocalExists bool
|
||||
Remote zfs.DatasetPath
|
||||
Local zfs.DatasetPath
|
||||
}
|
||||
replMapping := make(map[string]RemoteLocalMapping, len(remoteFilesystems))
|
||||
localTraversal := zfs.NewDatasetPathForest()
|
||||
localExists, err := zfs.ZFSListFilesystemExists()
|
||||
if err != nil {
|
||||
log.Printf("cannot get local filesystems map: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
|
||||
log.Printf("mapping using %#v\n", pull.Mapping)
|
||||
@ -270,13 +264,20 @@ func doPull(pull PullContext) (err error) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
m := RemoteLocalMapping{remoteFilesystems[fs], localFs, localExists(localFs)}
|
||||
m := RemoteLocalMapping{remoteFilesystems[fs], localFs}
|
||||
replMapping[m.Local.ToString()] = m
|
||||
localTraversal.Add(m.Local)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// get info about local filesystems
|
||||
localFilesystemState, err := zfs.ZFSListFilesystemState()
|
||||
if err != nil {
|
||||
log.Printf("cannot get local filesystems map: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("remoteFilesystems: %#v\nreplMapping: %#v\n", remoteFilesystems, replMapping)
|
||||
|
||||
// per fs sync, assume sorted in top-down order TODO
|
||||
@ -284,12 +285,18 @@ func doPull(pull PullContext) (err error) {
|
||||
localTraversal.WalkTopDown(func(v zfs.DatasetPathVisit) bool {
|
||||
|
||||
if v.FilledIn {
|
||||
if localExists(v.Path) {
|
||||
if _, exists := localFilesystemState[v.Path.ToString()]; exists {
|
||||
// No need to verify if this is a placeholder or not. It is sufficient
|
||||
// to know we can add child filesystems to it
|
||||
return true
|
||||
}
|
||||
log.Printf("aborting, don't know how to create fill-in dataset %s", v.Path)
|
||||
err = fmt.Errorf("aborting, don't know how to create fill-in dataset: %s", v.Path)
|
||||
return false
|
||||
log.Printf("creating placeholder filesystem %s", v.Path)
|
||||
err = zfs.ZFSCreatePlaceholderFilesystem(v.Path)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("aborting, cannot create placeholder filesystem %s: %s", v.Path, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
m, ok := replMapping[v.Path.ToString()]
|
||||
@ -303,8 +310,16 @@ func doPull(pull PullContext) (err error) {
|
||||
|
||||
log("mapping: %#v\n", m)
|
||||
|
||||
localState, localExists := localFilesystemState[m.Local.ToString()]
|
||||
|
||||
var versions []zfs.FilesystemVersion
|
||||
if m.LocalExists {
|
||||
switch {
|
||||
case !localExists:
|
||||
log("local filesystem does not exist")
|
||||
case localState.Placeholder:
|
||||
log("local filesystem is marked as placeholder")
|
||||
default:
|
||||
log("local filesystem exists, retrieving versions for diff")
|
||||
if versions, err = zfs.ZFSListFilesystemVersions(m.Local, nil); err != nil {
|
||||
log("cannot get filesystem versions, stopping...: %v\n", m.Local.ToString(), m, err)
|
||||
return false
|
||||
@ -323,6 +338,10 @@ func doPull(pull PullContext) (err error) {
|
||||
diff := zfs.MakeFilesystemDiff(versions, theirVersions)
|
||||
log("diff: %#v\n", diff)
|
||||
|
||||
if localState.Placeholder && diff.Conflict != zfs.ConflictAllRight {
|
||||
panic("internal inconsistency: local placeholder implies ConflictAllRight")
|
||||
}
|
||||
|
||||
switch diff.Conflict {
|
||||
case zfs.ConflictAllRight:
|
||||
|
||||
@ -364,7 +383,13 @@ func doPull(pull PullContext) (err error) {
|
||||
log("progress on receive operation: %v bytes received", p.TotalRX)
|
||||
})
|
||||
|
||||
if err = zfs.ZFSRecv(m.Local, &watcher, "-u"); err != nil {
|
||||
recvArgs := []string{"-u"}
|
||||
if localState.Placeholder {
|
||||
log("receive with forced rollback to replace placeholder filesystem")
|
||||
recvArgs = append(recvArgs, "-F")
|
||||
}
|
||||
|
||||
if err = zfs.ZFSRecv(m.Local, &watcher, recvArgs...); err != nil {
|
||||
log("error receiving stream, stopping...: %s", err)
|
||||
return false
|
||||
}
|
||||
|
84
zfs/diff.go
84
zfs/diff.go
@ -1,6 +1,11 @@
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"sort"
|
||||
)
|
||||
|
||||
@ -158,23 +163,86 @@ outer:
|
||||
return
|
||||
}
|
||||
|
||||
const ZREPL_PLACEHOLDER_PROPERTY_NAME string = "zrepl:placeholder"
|
||||
|
||||
type FilesystemState struct {
|
||||
Placeholder bool
|
||||
// TODO extend with resume token when that feature is finally added
|
||||
}
|
||||
|
||||
// A somewhat efficient way to determine if a filesystem exists on this host.
|
||||
// Particularly useful if exists is called more than once (will only fork exec once and cache the result)
|
||||
func ZFSListFilesystemExists() (exists func(p DatasetPath) bool, err error) {
|
||||
func ZFSListFilesystemState() (localState map[string]FilesystemState, err error) {
|
||||
|
||||
var actual [][]string
|
||||
if actual, err = ZFSList([]string{"name"}, "-t", "filesystem,volume"); err != nil {
|
||||
if actual, err = ZFSList([]string{"name", ZREPL_PLACEHOLDER_PROPERTY_NAME}, "-t", "filesystem,volume"); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
filesystems := make(map[string]bool, len(actual))
|
||||
localState = make(map[string]FilesystemState, len(actual))
|
||||
for _, e := range actual {
|
||||
filesystems[e[0]] = true
|
||||
}
|
||||
|
||||
exists = func(p DatasetPath) bool {
|
||||
return filesystems[p.ToString()]
|
||||
dp, err := NewDatasetPath(e[0])
|
||||
if err != nil {
|
||||
fmt.Errorf("ZFS does not return parseable dataset path: %s", e[0])
|
||||
}
|
||||
placeholder, _ := IsPlaceholder(dp, e[1])
|
||||
localState[e[0]] = FilesystemState{
|
||||
placeholder,
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Computes the value for the ZREPL_PLACEHOLDER_PROPERTY_NAME ZFS user property
|
||||
// to mark the given DatasetPath p as a placeholder
|
||||
//
|
||||
// We cannot simply use booleans here since user properties are always
|
||||
// inherited.
|
||||
//
|
||||
// We hash the DatasetPath and use it to check for a given path if it is the
|
||||
// one originally marked as placeholder.
|
||||
//
|
||||
// However, this prohibits moving datasets around via `zfs rename`. The
|
||||
// placeholder attribute must be re-computed for the dataset path after the
|
||||
// move.
|
||||
//
|
||||
// TODO better solution available?
|
||||
func PlaceholderPropertyValue(p DatasetPath) string {
|
||||
ps := []byte(p.ToString())
|
||||
sum := sha512.Sum512_256(ps)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
func IsPlaceholder(p DatasetPath, placeholderPropertyValue string) (isPlaceholder bool, err error) {
|
||||
expected := PlaceholderPropertyValue(p)
|
||||
isPlaceholder = expected == placeholderPropertyValue
|
||||
if !isPlaceholder {
|
||||
err = fmt.Errorf("expected %s, has %s", expected, placeholderPropertyValue)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ZFSCreatePlaceholderFilesystem(p DatasetPath) (err error) {
|
||||
v := PlaceholderPropertyValue(p)
|
||||
cmd := exec.Command(ZFS_BINARY, "create",
|
||||
"-o", fmt.Sprintf("%s=%s", ZREPL_PLACEHOLDER_PROPERTY_NAME, v),
|
||||
"-o", "mountpoint=none",
|
||||
p.ToString())
|
||||
|
||||
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err = cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = cmd.Wait(); err != nil {
|
||||
err = ZFSError{
|
||||
Stderr: stderr.Bytes(),
|
||||
WaitErr: err,
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user