mirror of
https://github.com/zrepl/zrepl.git
synced 2024-11-22 00:13:52 +01:00
receiving side: placeholder as simple on|off property
This commit is contained in:
parent
6f7467e8d8
commit
2f2e6e6a00
108
client/migrate.go
Normal file
108
client/migrate.go
Normal file
@ -0,0 +1,108 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/zrepl/zrepl/zfs"
|
||||
|
||||
"github.com/zrepl/zrepl/cli"
|
||||
"github.com/zrepl/zrepl/config"
|
||||
)
|
||||
|
||||
var (
|
||||
MigrateCmd = &cli.Subcommand{
|
||||
Use: "migrate",
|
||||
Short: "perform migration of the on-disk / zfs properties",
|
||||
SetupSubcommands: func() []*cli.Subcommand {
|
||||
return migrations
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type migration struct {
|
||||
name string
|
||||
method func(config *config.Config, args []string) error
|
||||
}
|
||||
|
||||
var migrations = []*cli.Subcommand{
|
||||
&cli.Subcommand{
|
||||
Use: "0.0.X:0.1:placeholder",
|
||||
Run: doMigratePlaceholder0_1,
|
||||
SetupFlags: func(f *pflag.FlagSet) {
|
||||
f.BoolVar(&migratePlaceholder0_1Args.dryRun, "dry-run", false, "dry run")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var migratePlaceholder0_1Args struct {
|
||||
dryRun bool
|
||||
}
|
||||
|
||||
func doMigratePlaceholder0_1(sc *cli.Subcommand, args []string) error {
|
||||
if len(args) != 0 {
|
||||
return fmt.Errorf("migration does not take arguments, got %v", args)
|
||||
}
|
||||
|
||||
cfg := sc.Config()
|
||||
|
||||
ctx := context.Background()
|
||||
allFSS, err := zfs.ZFSListMapping(ctx, zfs.NoFilter())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot list filesystems")
|
||||
}
|
||||
|
||||
type workItem struct {
|
||||
jobName string
|
||||
rootFS *zfs.DatasetPath
|
||||
fss []*zfs.DatasetPath
|
||||
}
|
||||
var wis []workItem
|
||||
for i, j := range cfg.Jobs {
|
||||
var rfsS string
|
||||
switch job := j.Ret.(type) {
|
||||
case *config.SinkJob:
|
||||
rfsS = job.RootFS
|
||||
case *config.PullJob:
|
||||
rfsS = job.RootFS
|
||||
default:
|
||||
fmt.Printf("ignoring job %q (%d/%d, type %T)\n", j.Name(), i, len(cfg.Jobs), j.Ret)
|
||||
continue
|
||||
}
|
||||
rfs, err := zfs.NewDatasetPath(rfsS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "root fs for job %q is not a valid dataset path", j.Name())
|
||||
}
|
||||
var fss []*zfs.DatasetPath
|
||||
for _, fs := range allFSS {
|
||||
if fs.HasPrefix(rfs) {
|
||||
fss = append(fss, fs)
|
||||
}
|
||||
}
|
||||
wis = append(wis, workItem{j.Name(), rfs, fss})
|
||||
}
|
||||
|
||||
for _, wi := range wis {
|
||||
fmt.Printf("job %q => migrate filesystems below root_fs %q\n", wi.jobName, wi.rootFS.ToString())
|
||||
if len(wi.fss) == 0 {
|
||||
fmt.Printf("\tno filesystems\n")
|
||||
continue
|
||||
}
|
||||
for _, fs := range wi.fss {
|
||||
fmt.Printf("\t%q ... ", fs.ToString())
|
||||
r, err := zfs.ZFSMigrateHashBasedPlaceholderToCurrent(fs, migratePlaceholder0_1Args.dryRun)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %s\n", err)
|
||||
} else if !r.NeedsModification {
|
||||
fmt.Printf("unchanged (placeholder=%v)\n", r.OriginalState.IsPlaceholder)
|
||||
} else {
|
||||
fmt.Printf("migrate (placeholder=%v) (old value = %q)\n",
|
||||
r.OriginalState.IsPlaceholder, r.OriginalState.RawLocalPropertyValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
16
client/migrate_test.go
Normal file
16
client/migrate_test.go
Normal file
@ -0,0 +1,16 @@
|
||||
package client
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestMigrationsUnambiguousNames(t *testing.T) {
|
||||
names := make(map[string]bool)
|
||||
for _, mig := range migrations {
|
||||
if _, ok := names[mig.Use]; ok {
|
||||
t.Errorf("duplicate migration name %q", mig.Use)
|
||||
t.FailNow()
|
||||
return
|
||||
} else {
|
||||
names[mig.Use] = true
|
||||
}
|
||||
}
|
||||
}
|
@ -116,17 +116,14 @@ var testPlaceholderArgs struct {
|
||||
}
|
||||
|
||||
var testPlaceholder = &cli.Subcommand{
|
||||
Use: "placeholder [--all | --dataset DATASET --action [compute | check [--placeholder PROP_VALUE]]]",
|
||||
Short: fmt.Sprintf("list received placeholder filesystems & compute the ZFS property %q", zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME),
|
||||
Use: "placeholder [--all | --dataset DATASET]",
|
||||
Short: fmt.Sprintf("list received placeholder filesystems (zfs property %q)", zfs.PlaceholderPropertyName),
|
||||
Example: `
|
||||
placeholder --all
|
||||
placeholder --dataset path/to/sink/clientident/fs --action compute
|
||||
placeholder --dataset path/to/sink/clientident/fs --action check --placeholder 1671a61be44d32d1f3f047c5f124b06f98f54143d82900545ee529165060b859`,
|
||||
placeholder --dataset path/to/sink/clientident/fs`,
|
||||
NoRequireConfig: true,
|
||||
SetupFlags: func(f *pflag.FlagSet) {
|
||||
f.StringVar(&testPlaceholderArgs.action, "action", "", "check | compute")
|
||||
f.StringVar(&testPlaceholderArgs.ds, "dataset", "", "dataset path (not required to exist)")
|
||||
f.StringVar(&testPlaceholderArgs.plv, "placeholder", "", "existing placeholder value to check against DATASET path")
|
||||
f.BoolVar(&testPlaceholderArgs.all, "all", false, "list tab-separated placeholder status of all filesystems")
|
||||
},
|
||||
Run: runTestPlaceholder,
|
||||
@ -134,58 +131,46 @@ var testPlaceholder = &cli.Subcommand{
|
||||
|
||||
func runTestPlaceholder(subcommand *cli.Subcommand, args []string) error {
|
||||
|
||||
var checkDPs []*zfs.DatasetPath
|
||||
|
||||
// all actions first
|
||||
if testPlaceholderArgs.all {
|
||||
out, err := zfs.ZFSList([]string{"name", zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME})
|
||||
out, err := zfs.ZFSList([]string{"name"})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not list ZFS filesystems")
|
||||
}
|
||||
fmt.Printf("IS_PLACEHOLDER\tDATASET\tPROPVALUE\tCOMPUTED\n")
|
||||
for _, row := range out {
|
||||
dp, err := zfs.NewDatasetPath(row[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
computedProp := zfs.PlaceholderPropertyValue(dp)
|
||||
is := "yes"
|
||||
if computedProp != row[1] {
|
||||
is = "no"
|
||||
}
|
||||
fmt.Printf("%s\t%s\t%q\t%q\n", is, dp.ToString(), row[1], computedProp)
|
||||
checkDPs = append(checkDPs, dp)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
dp, err := zfs.NewDatasetPath(testPlaceholderArgs.ds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dp.Empty() {
|
||||
return fmt.Errorf("must specify --dataset DATASET or --all")
|
||||
}
|
||||
checkDPs = append(checkDPs, dp)
|
||||
}
|
||||
|
||||
// other actions
|
||||
|
||||
dp, err := zfs.NewDatasetPath(testPlaceholderArgs.ds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
computedProp := zfs.PlaceholderPropertyValue(dp)
|
||||
|
||||
switch testPlaceholderArgs.action {
|
||||
case "check":
|
||||
var isPlaceholder bool
|
||||
if testPlaceholderArgs.plv != "" {
|
||||
isPlaceholder = computedProp == testPlaceholderArgs.plv
|
||||
} else {
|
||||
isPlaceholder, err = zfs.ZFSIsPlaceholderFilesystem(dp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("IS_PLACEHOLDER\tDATASET\tzrepl:placeholder\n")
|
||||
for _, dp := range checkDPs {
|
||||
ph, err := zfs.ZFSGetFilesystemPlaceholderState(dp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot get placeholder state")
|
||||
}
|
||||
if isPlaceholder {
|
||||
fmt.Printf("%s is placeholder\n", dp.ToString())
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("%s is not a placeholder", dp.ToString())
|
||||
if !ph.FSExists {
|
||||
panic("placeholder state inconsistent: filesystem " + ph.FS + " must exist in this context")
|
||||
}
|
||||
case "compute":
|
||||
fmt.Printf("%s\n", computedProp)
|
||||
return nil
|
||||
is := "yes"
|
||||
if !ph.IsPlaceholder {
|
||||
is = "no"
|
||||
}
|
||||
fmt.Printf("%s\t%s\t%s\n", is, dp.ToString(), ph.RawLocalPropertyValue)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown --action %q", testPlaceholderArgs.action)
|
||||
return nil
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
.. |bugfix| replace:: [BUG]
|
||||
.. |docs| replace:: [DOCS]
|
||||
.. |feature| replace:: [FEATURE]
|
||||
.. |mig| replace:: **[MIGRATION]**
|
||||
|
||||
.. _changelog:
|
||||
|
||||
@ -19,6 +20,7 @@ We use the following annotations for classifying changes:
|
||||
* |break| Change that breaks interoperability or persistent state representation with previous releases.
|
||||
As a package maintainer, make sure to warn your users about config breakage somehow.
|
||||
Note that even updating the package on both sides might not be sufficient, e.g. if persistent state needs to be migrated to a new format.
|
||||
* |mig| Migration that must be run by the user.
|
||||
* |feature| Change that introduces new functionality.
|
||||
* |bugfix| Change that fixes a bug, no regressions or incompatibilities expected.
|
||||
* |docs| Change to the documentation.
|
||||
@ -40,6 +42,7 @@ It breaks both configuration and transport format, and thus requires manual inte
|
||||
Notes to Package Maintainers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* Notify users about migrations (see changes attributed with |mig| below)
|
||||
* If the daemon crashes, the stack trace produced by the Go runtime and possibly diagnostic output of zrepl will be written to stderr.
|
||||
This behavior is independent from the ``stdout`` outlet type.
|
||||
Please make sure the stderr output of the daemon is captured somewhere.
|
||||
@ -52,6 +55,15 @@ Notes to Package Maintainers
|
||||
Changes
|
||||
~~~~~~~
|
||||
|
||||
* |break| |mig| Placeholder property representation changed
|
||||
|
||||
* The :ref:`placeholder property <replication-placeholder-property>` now uses ``on|off`` as values
|
||||
instead of hashes of the dataset path. This permits renames of the sink filesystem without
|
||||
updating all placeholder properties.
|
||||
* Relevant for 0.0.X-0.1-rc* to 0.1 migrations
|
||||
* Make sure your config is valid with ``zrepl configcheck``
|
||||
* Run ``zrepl migrate 0.0.X:0.1:placeholder``
|
||||
|
||||
* |feature| :issue:`55` : Push replication (see :ref:`push job <job-push>` and :ref:`sink job <job-sink>`)
|
||||
* |feature| :ref:`TCP Transport <transport-tcp>`
|
||||
* |feature| :ref:`TCP + TLS client authentication transport <transport-tcp+tlsclientauth>`
|
||||
|
@ -109,7 +109,9 @@ It is a bookmark of the most recent successfully replicated snapshot to the rece
|
||||
It is is used by the :ref:`not_replicated <prune-keep-not-replicated>` keep rule to identify all snapshots that have not yet been replicated to the receiving side.
|
||||
Regardless of whether that keep rule is used, the bookmark ensures that replication can always continue incrementally.
|
||||
|
||||
**Placeholder filesystems** on the receiving side are regular ZFS filesystems with the placeholder property ``zrepl:placeholder``.
|
||||
.. _replication-placeholder-property:
|
||||
|
||||
**Placeholder filesystems** on the receiving side are regular ZFS filesystems with the placeholder property ``zrepl:placeholder=on``.
|
||||
Placeholders allow the receiving side to mirror the sender's ZFS dataset hierachy without replicating every filesystem at every intermediary dataset path component.
|
||||
Consider the following example: ``S/H/J`` shall be replicated to ``R/sink/job/S/H/J``, but neither ``S/H`` nor ``S`` shall be replicated.
|
||||
ZFS requires the existence of ``R/sink/job/S`` and ``R/sink/job/S/H`` in order to receive into ``R/sink/job/S/H/J``.
|
||||
|
@ -35,6 +35,9 @@ CLI Overview
|
||||
- manually abort current replication + pruning of JOB
|
||||
* - ``zrepl configcheck``
|
||||
- check if config can be parsed without errors
|
||||
* - ``zrepl migrate``
|
||||
- | perform on-disk state / ZFS property migrations
|
||||
| (see :ref:`changelog <changelog>` for details)
|
||||
|
||||
.. _usage-zrepl-daemon:
|
||||
|
||||
|
@ -247,20 +247,20 @@ func (s *Receiver) ListFilesystems(ctx context.Context, req *pdu.ListFilesystemR
|
||||
// present filesystem without the root_fs prefix
|
||||
fss := make([]*pdu.Filesystem, 0, len(filtered))
|
||||
for _, a := range filtered {
|
||||
ph, err := zfs.ZFSIsPlaceholderFilesystem(a)
|
||||
l := getLogger(ctx).WithField("fs", a)
|
||||
ph, err := zfs.ZFSGetFilesystemPlaceholderState(a)
|
||||
if err != nil {
|
||||
getLogger(ctx).
|
||||
WithError(err).
|
||||
WithField("fs", a).
|
||||
Error("inconsistent placeholder property")
|
||||
return nil, errors.New("server error: inconsistent placeholder property") // don't leak path
|
||||
l.WithError(err).Error("error getting placeholder state")
|
||||
return nil, errors.Wrapf(err, "cannot get placeholder state for fs %q", a)
|
||||
}
|
||||
l.WithField("placeholder_state", fmt.Sprintf("%#v", ph)).Debug("placeholder state")
|
||||
if !ph.FSExists {
|
||||
l.Error("inconsistent placeholder state: filesystem must exists")
|
||||
err := errors.Errorf("inconsistent placeholder state: filesystem %q must exist in this context", a.ToString())
|
||||
return nil, err
|
||||
}
|
||||
getLogger(ctx).
|
||||
WithField("fs", a.ToString()).
|
||||
WithField("is_placeholder", ph).
|
||||
Debug("filesystem")
|
||||
a.TrimPrefix(root)
|
||||
fss = append(fss, &pdu.Filesystem{Path: a.ToString(), IsPlaceholder: ph})
|
||||
fss = append(fss, &pdu.Filesystem{Path: a.ToString(), IsPlaceholder: ph.IsPlaceholder})
|
||||
}
|
||||
if len(fss) == 0 {
|
||||
getLogger(ctx).Debug("no filesystems found")
|
||||
@ -331,17 +331,26 @@ func (s *Receiver) Receive(ctx context.Context, req *pdu.ReceiveReq, receive zfs
|
||||
if v.Path.Equal(lp) {
|
||||
return false
|
||||
}
|
||||
_, err := zfs.ZFSGet(v.Path, []string{zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME})
|
||||
ph, err := zfs.ZFSGetFilesystemPlaceholderState(v.Path)
|
||||
if err != nil {
|
||||
// interpret this as an early exit of the zfs binary due to the fs not existing
|
||||
if err := zfs.ZFSCreatePlaceholderFilesystem(v.Path); err != nil {
|
||||
getLogger(ctx).
|
||||
WithError(err).
|
||||
WithField("placeholder_fs", v.Path).
|
||||
Error("cannot create placeholder filesystem")
|
||||
visitErr = err
|
||||
return false
|
||||
}
|
||||
getLogger(ctx).
|
||||
WithField("fs", v.Path.ToString()).
|
||||
WithField("placeholder_state", fmt.Sprintf("%#v", ph)).
|
||||
Debug("placeholder state for filesystem")
|
||||
|
||||
if !ph.FSExists {
|
||||
l := getLogger(ctx).WithField("placeholder_fs", v.Path)
|
||||
l.Debug("create placeholder filesystem")
|
||||
err := zfs.ZFSCreatePlaceholderFilesystem(v.Path)
|
||||
if err != nil {
|
||||
l.WithError(err).Error("cannot create placeholder filesystem")
|
||||
visitErr = err
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
getLogger(ctx).WithField("filesystem", v.Path.ToString()).Debug("exists")
|
||||
return true // leave this fs as is
|
||||
@ -352,17 +361,16 @@ func (s *Receiver) Receive(ctx context.Context, req *pdu.ReceiveReq, receive zfs
|
||||
return nil, visitErr
|
||||
}
|
||||
|
||||
// determine whether we need to rollback the filesystem / change its placeholder state
|
||||
var clearPlaceholderProperty bool
|
||||
var recvOpts zfs.RecvOptions
|
||||
props, err := zfs.ZFSGet(lp, []string{zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME})
|
||||
if err == nil {
|
||||
if isPlaceholder, _ := zfs.IsPlaceholder(lp, props.Get(zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME)); isPlaceholder {
|
||||
recvOpts.RollbackAndForceRecv = true
|
||||
clearPlaceholderProperty = true
|
||||
}
|
||||
ph, err := zfs.ZFSGetFilesystemPlaceholderState(lp)
|
||||
if err == nil && ph.FSExists && ph.IsPlaceholder {
|
||||
recvOpts.RollbackAndForceRecv = true
|
||||
clearPlaceholderProperty = true
|
||||
}
|
||||
if clearPlaceholderProperty {
|
||||
if err := zfs.ZFSSetNoPlaceholder(lp); err != nil {
|
||||
if err := zfs.ZFSSetPlaceholder(lp, false); err != nil {
|
||||
return nil, fmt.Errorf("cannot clear placeholder property for forced receive: %s", err)
|
||||
}
|
||||
}
|
||||
|
1
main.go
1
main.go
@ -16,6 +16,7 @@ func init() {
|
||||
cli.AddSubcommand(client.VersionCmd)
|
||||
cli.AddSubcommand(client.PprofCmd)
|
||||
cli.AddSubcommand(client.TestCmd)
|
||||
cli.AddSubcommand(client.MigrateCmd)
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -9,6 +9,16 @@ type DatasetFilter interface {
|
||||
Filter(p *DatasetPath) (pass bool, err error)
|
||||
}
|
||||
|
||||
// Returns a DatasetFilter that does not filter (passes all paths)
|
||||
func NoFilter() DatasetFilter {
|
||||
return noFilter{}
|
||||
}
|
||||
type noFilter struct {}
|
||||
|
||||
var _ DatasetFilter = noFilter{}
|
||||
|
||||
func (noFilter) Filter(p *DatasetPath) (pass bool, err error) { return true, nil }
|
||||
|
||||
func ZFSListMapping(ctx context.Context, filter DatasetFilter) (datasets []*DatasetPath, err error) {
|
||||
res, err := ZFSListMappingProperties(ctx, filter, nil)
|
||||
if err != nil {
|
||||
|
@ -5,87 +5,81 @@ import (
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
const ZREPL_PLACEHOLDER_PROPERTY_NAME string = "zrepl:placeholder"
|
||||
const (
|
||||
// For a placeholder filesystem to be a placeholder, the property source must be local,
|
||||
// i.e. not inherited.
|
||||
PlaceholderPropertyName string = "zrepl:placeholder"
|
||||
placeholderPropertyOn string = "on"
|
||||
placeholderPropertyOff string = "off"
|
||||
)
|
||||
|
||||
type FilesystemState struct {
|
||||
Placeholder bool
|
||||
// TODO extend with resume token when that feature is finally added
|
||||
}
|
||||
|
||||
// A somewhat efficient way to determine if a filesystem exists on this host.
|
||||
// Particularly useful if exists is called more than once (will only fork exec once and cache the result)
|
||||
func ZFSListFilesystemState() (localState map[string]FilesystemState, err error) {
|
||||
|
||||
var actual [][]string
|
||||
if actual, err = ZFSList([]string{"name", ZREPL_PLACEHOLDER_PROPERTY_NAME}, "-t", "filesystem,volume"); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
localState = make(map[string]FilesystemState, len(actual))
|
||||
for _, e := range actual {
|
||||
dp, err := NewDatasetPath(e[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ZFS does not return parseable dataset path: %s", e[0])
|
||||
}
|
||||
placeholder, _ := IsPlaceholder(dp, e[1])
|
||||
localState[e[0]] = FilesystemState{
|
||||
placeholder,
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Computes the value for the ZREPL_PLACEHOLDER_PROPERTY_NAME ZFS user property
|
||||
// to mark the given DatasetPath p as a placeholder
|
||||
// computeLegacyPlaceholderPropertyValue is a legacy-compatibility function.
|
||||
//
|
||||
// We cannot simply use booleans here since user properties are always
|
||||
// In the 0.0.x series, the value stored in the PlaceholderPropertyName user property
|
||||
// was a hash value of the dataset path.
|
||||
// A simple `on|off` value could not be used at the time because `zfs list` was used to
|
||||
// list all filesystems and their placeholder state with a single command: due to property
|
||||
// inheritance, `zfs list` would print the placeholder state for all (non-placeholder) children
|
||||
// of a dataset, so the hash value was used to distinguish whether the property was local or
|
||||
// inherited.
|
||||
//
|
||||
// We hash the DatasetPath and use it to check for a given path if it is the
|
||||
// one originally marked as placeholder.
|
||||
// One of the drawbacks of the above approach is that `zfs rename` renders a placeholder filesystem
|
||||
// a non-placeholder filesystem if any of the parent path components change.
|
||||
//
|
||||
// However, this prohibits moving datasets around via `zfs rename`. The
|
||||
// placeholder attribute must be re-computed for the dataset path after the
|
||||
// move.
|
||||
//
|
||||
// TODO better solution available?
|
||||
func PlaceholderPropertyValue(p *DatasetPath) string {
|
||||
// We `zfs get` nowadays, which returns the property source, making the hash value no longer
|
||||
// necessary. However, we want to keep legacy compatibility.
|
||||
func computeLegacyHashBasedPlaceholderPropertyValue(p *DatasetPath) string {
|
||||
ps := []byte(p.ToString())
|
||||
sum := sha512.Sum512_256(ps)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
func IsPlaceholder(p *DatasetPath, placeholderPropertyValue string) (isPlaceholder bool, err error) {
|
||||
expected := PlaceholderPropertyValue(p)
|
||||
isPlaceholder = expected == placeholderPropertyValue
|
||||
if !isPlaceholder {
|
||||
err = fmt.Errorf("expected %s, has %s", expected, placeholderPropertyValue)
|
||||
// the caller asserts that placeholderPropertyValue is sourceLocal
|
||||
func isLocalPlaceholderPropertyValuePlaceholder(p *DatasetPath, placeholderPropertyValue string) (isPlaceholder bool) {
|
||||
legacy := computeLegacyHashBasedPlaceholderPropertyValue(p)
|
||||
switch placeholderPropertyValue {
|
||||
case legacy:
|
||||
return true
|
||||
case placeholderPropertyOn:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// for nonexistent FS, isPlaceholder == false && err == nil
|
||||
func ZFSIsPlaceholderFilesystem(p *DatasetPath) (isPlaceholder bool, err error) {
|
||||
props, err := zfsGet(p.ToString(), []string{ZREPL_PLACEHOLDER_PROPERTY_NAME}, sourceAny)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// interpret this as an early exit of the zfs binary due to the fs not existing
|
||||
return false, nil
|
||||
type FilesystemPlaceholderState struct {
|
||||
FS string
|
||||
FSExists bool
|
||||
IsPlaceholder bool
|
||||
RawLocalPropertyValue string
|
||||
}
|
||||
|
||||
// ZFSGetFilesystemPlaceholderState is the authoritative way to determine whether a filesystem
|
||||
// is a placeholder. Note that the property source must be `local` for the returned value to be valid.
|
||||
//
|
||||
// For nonexistent FS, err == nil and state.FSExists == false
|
||||
func ZFSGetFilesystemPlaceholderState(p *DatasetPath) (state *FilesystemPlaceholderState, err error) {
|
||||
state = &FilesystemPlaceholderState{FS: p.ToString()}
|
||||
state.FS = p.ToString()
|
||||
props, err := zfsGet(p.ToString(), []string{PlaceholderPropertyName}, sourceLocal)
|
||||
var _ error = (*DatasetDoesNotExist)(nil) // weak assertion on zfsGet's interface
|
||||
if _, ok := err.(*DatasetDoesNotExist); ok {
|
||||
return state, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
return state, err
|
||||
}
|
||||
isPlaceholder, _ = IsPlaceholder(p, props.Get(ZREPL_PLACEHOLDER_PROPERTY_NAME))
|
||||
return
|
||||
state.FSExists = true
|
||||
state.RawLocalPropertyValue = props.Get(PlaceholderPropertyName)
|
||||
state.IsPlaceholder = isLocalPlaceholderPropertyValuePlaceholder(p, state.RawLocalPropertyValue)
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func ZFSCreatePlaceholderFilesystem(p *DatasetPath) (err error) {
|
||||
v := PlaceholderPropertyValue(p)
|
||||
cmd := exec.Command(ZFS_BINARY, "create",
|
||||
"-o", fmt.Sprintf("%s=%s", ZREPL_PLACEHOLDER_PROPERTY_NAME, v),
|
||||
"-o", fmt.Sprintf("%s=%s", PlaceholderPropertyName, placeholderPropertyOn),
|
||||
"-o", "mountpoint=none",
|
||||
p.ToString())
|
||||
|
||||
@ -106,8 +100,43 @@ func ZFSCreatePlaceholderFilesystem(p *DatasetPath) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func ZFSSetNoPlaceholder(p *DatasetPath) error {
|
||||
func ZFSSetPlaceholder(p *DatasetPath, isPlaceholder bool) error {
|
||||
props := NewZFSProperties()
|
||||
props.Set(ZREPL_PLACEHOLDER_PROPERTY_NAME, "off")
|
||||
prop := placeholderPropertyOff
|
||||
if isPlaceholder {
|
||||
prop = placeholderPropertyOn
|
||||
}
|
||||
props.Set(PlaceholderPropertyName, prop)
|
||||
return zfsSet(p.ToString(), props)
|
||||
}
|
||||
}
|
||||
|
||||
type MigrateHashBasedPlaceholderReport struct {
|
||||
OriginalState FilesystemPlaceholderState
|
||||
NeedsModification bool
|
||||
}
|
||||
|
||||
// fs must exist, will panic otherwise
|
||||
func ZFSMigrateHashBasedPlaceholderToCurrent(fs *DatasetPath, dryRun bool) (*MigrateHashBasedPlaceholderReport, error) {
|
||||
st, err := ZFSGetFilesystemPlaceholderState(fs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting placeholder state: %s", err)
|
||||
}
|
||||
if !st.FSExists {
|
||||
panic("inconsistent placeholder state returned: fs must exist")
|
||||
}
|
||||
|
||||
report := MigrateHashBasedPlaceholderReport{
|
||||
OriginalState: *st,
|
||||
}
|
||||
report.NeedsModification = st.IsPlaceholder && st.RawLocalPropertyValue != placeholderPropertyOn
|
||||
|
||||
if dryRun || !report.NeedsModification {
|
||||
return &report, nil
|
||||
}
|
||||
|
||||
err = ZFSSetPlaceholder(fs, st.IsPlaceholder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error re-writing placeholder property: %s", err)
|
||||
}
|
||||
return &report, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user