modularize setup scripts, make hibernation safe with ZFS, apply noexec FS option everywhere

This commit is contained in:
Niklas Gollenstede 2022-07-29 12:49:55 +02:00
parent 8b0e200c73
commit 7ab9215b0a
28 changed files with 518 additions and 263 deletions

View File

@ -3,6 +3,8 @@
"cSpell.words": [ "cSpell.words": [
"aarch64", // processor architecture "aarch64", // processor architecture
"acltype", // zfs "acltype", // zfs
"acpi", // abbr
"ahci", // abbr
"ashift", // zfs "ashift", // zfs
"askpass", // program "askpass", // program
"attrset", "attrsets", // nix/abbr (attribute set) "attrset", "attrsets", // nix/abbr (attribute set)
@ -70,6 +72,7 @@
"hostfwd", // cli arg "hostfwd", // cli arg
"hostiocache", // virtual box "hostiocache", // virtual box
"hostport", // cli arg "hostport", // cli arg
"hugepages", // linux
"inetutils", // package "inetutils", // package
"inodes", // plural "inodes", // plural
"internalcommands", // virtual box "internalcommands", // virtual box
@ -109,6 +112,7 @@
"nofail", // cli arg "nofail", // cli arg
"nographic", // cli arg "nographic", // cli arg
"noheadings", // cli arg "noheadings", // cli arg
"nohibernate", // kernel param
"nosuid", // mount option "nosuid", // mount option
"oneshot", // systemd "oneshot", // systemd
"optimise", // B/E "optimise", // B/E
@ -136,9 +140,11 @@
"refreservation", // zfs "refreservation", // zfs
"relatime", // mount option "relatime", // mount option
"rpool", // zfs "rpool", // zfs
"rprivate", // linux
"sata", // storage protocol "sata", // storage protocol
"sbabic", // name "sbabic", // name
"screenshotpng", // virtual box "screenshotpng", // virtual box
"setsid", // program / function
"setuid", // cli arg "setuid", // cli arg
"sgdisk", // program "sgdisk", // program
"showvminfo", // virtual box "showvminfo", // virtual box
@ -149,6 +155,7 @@
"storageattach", // virtual box "storageattach", // virtual box
"stty", // program / function "stty", // program / function
"sublist", // Nix "sublist", // Nix
"swsuspend", // parameter
"syncoid", // program "syncoid", // program
"temproot", // abbr (temporary root (FS)) "temproot", // abbr (temporary root (FS))
"timesync", // systemd "timesync", // systemd

View File

@ -28,7 +28,7 @@ The modules are inactive by default, and are, where possible, designed to be ind
Any `wip.preface.*` options have to be set in the first sub-module in these files (`## Hardware` section). \ Any `wip.preface.*` options have to be set in the first sub-module in these files (`## Hardware` section). \
This flake only defines a single [`example`](./hosts/example.nix.md) host meant to demonstrate how other flakes can use the (NixOS) flake library framework. This flake only defines a single [`example`](./hosts/example.nix.md) host meant to demonstrate how other flakes can use the (NixOS) flake library framework.
[`example/`](./example/) contains an example of adjusting the [installation](./example/install.sh.md) script for the hosts and this flake's [default config](./example/defaultConfig/) (see [Namespacing](#namespacing-in-nixos)). [`example/`](./example/) contains an example of adjusting the [installation](./example/install.sh.md) script for the hosts, and this flake's [default config](./example/defaultConfig/) (see [Namespacing](#namespacing-in-nixos)).
## Namespacing in NixOS ## Namespacing in NixOS
@ -81,7 +81,6 @@ Technically, Nix (and most other code files) don't need to have any specific fil
pkgs = import <nixpkgs> { } pkgs = import <nixpkgs> { }
:lf . # load CWD's flake's outputs as variables :lf . # load CWD's flake's outputs as variables
pkgs = nixosConfigurations.target.pkgs pkgs = nixosConfigurations.target.pkgs
lib = lib { inherit pkgs; inherit (pkgs) lib; }
``` ```

View File

@ -15,7 +15,6 @@ See its [README](../lib/setup-scripts/README.md) for more documentation.
function install-system {( set -eu # 1: blockDevs function install-system {( set -eu # 1: blockDevs
prepare-installer "$@" prepare-installer "$@"
do-disk-setup "${argv[0]}" do-disk-setup "${argv[0]}"
init-or-restore-system
install-system-to $mnt install-system-to $mnt
)} )}

Binary file not shown.

View File

@ -1,9 +1,7 @@
{ description = ( { description = (
"Work In Progress: a collection of Nix things that are used in more than one project, but aren't refined enough to be standalone libraries/modules/... (yet)." "Work In Progress: a collection of Nix things that are used in more than one project, but aren't refined enough to be standalone libraries/modules/... (yet)."
/** # This flake file defines the inputs (other than except some files/archives fetched by hardcoded hash) and exports all results produced by this repository.
* This flake file defines the main inputs (all except for some files/archives fetched by hardcoded hash) and exports almost all usable results. # It should always pass »nix flake check« and »nix flake show --allow-import-from-derivation«, which means inputs and outputs comply with the flake convention.
* It should always pass »nix flake check« and »nix flake show --allow-import-from-derivation«, which means inputs and outputs comply with the flake convention.
*/
); inputs = { ); inputs = {
# To update »./flake.lock«: $ nix flake update # To update »./flake.lock«: $ nix flake update
@ -12,25 +10,20 @@
}; outputs = inputs: let patches = { }; outputs = inputs: let patches = {
nixpkgs = [ nixpkgs = [ # Can define a list of patches for each input here:
# ./patches/nixpkgs-test.patch # after »nix build«, check »result/inputs/nixpkgs/patched!« to see that these patches were applied # { url = "https://github.com/NixOS/nixpkgs/pull/###.diff"; sha256 = inputs.nixpkgs.lib.fakeSha256; } # Path from URL.
# ./patches/nixpkgs-fix-systemd-boot-install.patch # ./patches/nixpkgs-fix-systemd-boot-install.patch # Local path file. (use long native / direct path to ensure it only changes if the content does)
# ./patches/nixpkgs-test.patch # After »nix build«, check »result/inputs/nixpkgs/patched!« to see that these patches were applied.
]; ];
}; in (import "${./.}/lib/flakes.nix" "${./.}/lib" inputs).patchFlakeInputsAndImportRepo inputs patches ./. (inputs@{ self, nixpkgs, ... }: repo@{ overlays, lib, ... }: let }; in (import "${./.}/lib/flakes.nix" "${./.}/lib" inputs).patchFlakeInputsAndImportRepo inputs patches ./. (inputs@{ self, nixpkgs, ... }: repo@{ overlays, lib, ... }: let
systemsFlake = lib.wip.mkSystemsFlake (rec {
#systems = { dir = "${self}/hosts"; exclude = [ ]; }; # (implicit)
inherit inputs;
scripts = (lib.attrValues lib.wip.setup-scripts) ++ [ ./example/install.sh.md ];
});
in [ # Run »nix flake show --allow-import-from-derivation« to see what this merges to: in [ # Run »nix flake show --allow-import-from-derivation« to see what this merges to:
repo repo # lib.* nixosModules.* overlays.*
(if true then systemsFlake else { }) (lib.wip.mkSystemsFlake { inherit inputs; }) # nixosConfigurations.* apps.*-linux.* devShells.*-linux.* packages.*-linux.all-systems
(lib.wip.forEachSystem [ "aarch64-linux" "x86_64-linux" ] (localSystem: { (lib.wip.forEachSystem [ "aarch64-linux" "x86_64-linux" ] (localSystem: { # packages.*-linux.* defaultPackage.*-linux
packages = builtins.removeAttrs (lib.wip.getModifiedPackages (lib.wip.importPkgs inputs { system = localSystem; }) overlays) [ "libblockdev" ]; packages = builtins.removeAttrs (lib.wip.getModifiedPackages (lib.wip.importPkgs inputs { system = localSystem; }) overlays) [ "libblockdev" ];
defaultPackage = systemsFlake.packages.${localSystem}.all-systems; defaultPackage = self.packages.${localSystem}.all-systems;
})) }))
{ patches = (lib.wip.importWrapped inputs "${self}/patches").result; } { patches = (lib.wip.importWrapped inputs "${self}/patches").result; } # patches.*
]); } ]); }

View File

@ -39,6 +39,9 @@ in { imports = [ ({ ## Hardware
boot.loader.systemd-boot.enable = true; boot.loader.grub.enable = false; boot.loader.systemd-boot.enable = true; boot.loader.grub.enable = false;
# Example of adding and/or overwriting setup/maintenance functions:
wip.setup.scripts.install-overwrite = { path = ../example/install.sh.md; order = 1000; };
}) (lib.mkIf false { ## Minimal explicit FS setup }) (lib.mkIf false { ## Minimal explicit FS setup
@ -93,7 +96,7 @@ in { imports = [ ({ ## Hardware
}) (lib.mkIf (name == "example-raidz") { ## Multi-disk ZFS setup }) (lib.mkIf (name == "example-raidz") { ## Multi-disk ZFS setup
#wip.fs.disks.devices.primary.size = "16G"; # (default) wip.fs.disks.devices = lib.genAttrs ([ "primary" "raidz1" "raidz2" "raidz3" ]) (name: { size = "16G"; });
wip.fs.boot.enable = true; wip.fs.boot.size = "512M"; wip.fs.boot.enable = true; wip.fs.boot.size = "512M";
wip.fs.keystore.enable = true; wip.fs.keystore.enable = true;
@ -126,7 +129,7 @@ in { imports = [ ({ ## Hardware
services.getty.autologinUser = "root"; users.users.root.password = "root"; services.getty.autologinUser = "root"; users.users.root.password = "root";
boot.kernelParams = lib.mkForce [ "console=tty1" "console=ttyS0" "boot.shell_on_fail" ]; boot.kernelParams = [ /* "console=tty1" */ "console=ttyS0" "boot.shell_on_fail" ]; wip.base.panic_on_fail = false;
wip.services.dropbear.enable = true; wip.services.dropbear.enable = true;
#wip.services.dropbear.rootKeys = [ ''${lib.readFile "${dirname}/....pub"}'' ]; #wip.services.dropbear.rootKeys = [ ''${lib.readFile "${dirname}/....pub"}'' ];
@ -134,4 +137,6 @@ in { imports = [ ({ ## Hardware
#wip.fs.disks.devices.primary.gptOffset = 64; #wip.fs.disks.devices.primary.gptOffset = 64;
#wip.fs.disks.devices.primary.size = "250059096K"; # 256GB Intel H10 #wip.fs.disks.devices.primary.size = "250059096K"; # 256GB Intel H10
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
}) ]; } }) ]; }

View File

@ -80,7 +80,7 @@ in rec {
} // (args.specialArgs or { }) // { } // (args.specialArgs or { }) // {
inherit name; nodes = peers; # NixOPS inherit name; nodes = peers; # NixOPS
}; };
in { inherit preface; } // (nixosSystem { in let system = { inherit preface; } // (nixosSystem {
system = targetSystem; system = targetSystem;
modules = [ ( modules = [ (
(importWrapped inputs entryPath).module (importWrapped inputs entryPath).module
@ -90,7 +90,24 @@ in rec {
} { } {
# List of host names to instantiate this host config for, instead of just for the file name. # List of host names to instantiate this host config for, instead of just for the file name.
options.${prefix}.preface.instances = lib.mkOption { type = lib.types.listOf lib.types.str; default = [ name ]; }; options.${prefix}.preface.instances = lib.mkOption { type = lib.types.listOf lib.types.str; default = [ name ]; };
} ({ config, ... }: { } {
options.${prefix}.setup.scripts = lib.mkOption {
description = ''Attrset of bash scripts defining functions that do installation and maintenance operations. See »./setup-scripts/README.md« below for more information.'';
type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, config, ... }: { options = {
name = lib.mkOption { description = "Name that this device is being referred to as in other places."; type = lib.types.str; default = name; readOnly = true; };
path = lib.mkOption { description = "Path of file for ».text« to be loaded from."; type = lib.types.nullOr lib.types.path; default = null; };
text = lib.mkOption { description = "Script text to process."; type = lib.types.str; default = builtins.readFile config.path; };
order = lib.mkOption { description = "Parsing order of the scripts. Higher orders will be parsed later, and can thus overwrite earlier definitions."; type = lib.types.int; default = 1000; };
}; })));
apply = lib.filterAttrs (k: v: v != null);
}; config.${prefix}.setup.scripts = lib.mapAttrs (name: path: lib.mkOptionDefault { inherit path; }) (setup-scripts);
} ({ config, pkgs, ... }: {
options.${prefix}.setup.appliedScripts = lib.mkOption {
type = lib.types.functionTo lib.types.str; readOnly = true;
default = context: substituteImplicit { inherit pkgs; scripts = lib.sort (a: b: a.order < b.order) (lib.attrValues config.${prefix}.setup.scripts); context = system // context; }; # inherit (builtins) trace;
};
}) ({ config, ... }: {
imports = modules; nixpkgs = { inherit overlays; } imports = modules; nixpkgs = { inherit overlays; }
// (if buildSystem != targetSystem then { localSystem.system = buildSystem; crossSystem.system = targetSystem; } else { system = targetSystem; }); // (if buildSystem != targetSystem then { localSystem.system = buildSystem; crossSystem.system = targetSystem; } else { system = targetSystem; });
@ -103,7 +120,7 @@ in rec {
}) ]; }) ];
specialArgs = specialArgs; # explicitly passing »pkgs« here breaks »config.nixpkgs.overlays«! specialArgs = specialArgs; # explicitly passing »pkgs« here breaks »config.nixpkgs.overlays«!
}); }); in system;
# Given either a list (or attr set) of »files« (paths to ».nix« or ».nix.md« files for dirs with »default.nix« files in them) or a »dir« path (and optionally a list of file names to »exclude« from it), this builds the NixOS configuration for each host (per file) in the context of all configs provided. # Given either a list (or attr set) of »files« (paths to ».nix« or ».nix.md« files for dirs with »default.nix« files in them) or a »dir« path (and optionally a list of file names to »exclude« from it), this builds the NixOS configuration for each host (per file) in the context of all configs provided.
# If »files« is an attr set, exactly one host with the attribute's name as hostname is built for each attribute. Otherwise the default is to build for one host per configuration file, named as the file name without extension or the sub-directory name. Setting »${prefix}.preface.instances« can override this to build the same configuration for those multiple names instead (the specific »name« is passed as additional »specialArgs« to the modules and can thus be used to adjust the config per instance). # If »files« is an attr set, exactly one host with the attribute's name as hostname is built for each attribute. Otherwise the default is to build for one host per configuration file, named as the file name without extension or the sub-directory name. Setting »${prefix}.preface.instances« can override this to build the same configuration for those multiple names instead (the specific »name« is passed as additional »specialArgs« to the modules and can thus be used to adjust the config per instance).
@ -137,10 +154,8 @@ in rec {
mkSystemsFlake = args@{ mkSystemsFlake = args@{
# An attrset of imported Nix flakes, for example the argument(s) passed to the flake »outputs« function. All other arguments are optional (and have reasonable defaults) if this is provided and contains »self« and the standard »nixpkgs«. This is also the second argument passed to the individual host's top level config files. # An attrset of imported Nix flakes, for example the argument(s) passed to the flake »outputs« function. All other arguments are optional (and have reasonable defaults) if this is provided and contains »self« and the standard »nixpkgs«. This is also the second argument passed to the individual host's top level config files.
inputs ? { }, inputs ? { },
# Root path of the NixOS configuration. »./.« in the »flake.nix«
configPath ? inputs.self.outPath,
# Arguments »{ files, dir, exclude, }« to »mkNixosConfigurations«, see there for details. May also be a list of those attrsets, in which case those multiple sets of hosts will be built separately by »mkNixosConfigurations«, allowing for separate sets of »peers« passed to »mkNixosConfiguration«. Each call will receive all other arguments, and the resulting sets of hosts will be merged. # Arguments »{ files, dir, exclude, }« to »mkNixosConfigurations«, see there for details. May also be a list of those attrsets, in which case those multiple sets of hosts will be built separately by »mkNixosConfigurations«, allowing for separate sets of »peers« passed to »mkNixosConfiguration«. Each call will receive all other arguments, and the resulting sets of hosts will be merged.
systems ? ({ dir = "${configPath}/hosts"; exclude = [ ]; }), systems ? ({ dir = "${inputs.self}/hosts"; exclude = [ ]; }),
# List of overlays to set as »config.nixpkgs.overlays«. Defaults to the ».overlay(s)« of all »overlayInputs«/»inputs« (incl. »inputs.self«). # List of overlays to set as »config.nixpkgs.overlays«. Defaults to the ».overlay(s)« of all »overlayInputs«/»inputs« (incl. »inputs.self«).
overlays ? (builtins.concatLists (map (input: if input?overlay then [ input.overlay ] else if input?overlays then builtins.attrValues input.overlays else [ ]) (builtins.attrValues overlayInputs))), overlays ? (builtins.concatLists (map (input: if input?overlay then [ input.overlay ] else if input?overlays then builtins.attrValues input.overlays else [ ]) (builtins.attrValues overlayInputs))),
# (Subset of) »inputs« that »overlays« will be used from. Example: »{ inherit (inputs) self flakeA flakeB; }«. # (Subset of) »inputs« that »overlays« will be used from. Example: »{ inherit (inputs) self flakeA flakeB; }«.
@ -151,31 +166,35 @@ in rec {
moduleInputs ? (builtins.removeAttrs inputs [ "nixpkgs" ]), moduleInputs ? (builtins.removeAttrs inputs [ "nixpkgs" ]),
# Additional arguments passed to each module evaluated for the host config (if that module is defined as a function). # Additional arguments passed to each module evaluated for the host config (if that module is defined as a function).
specialArgs ? { }, specialArgs ? { },
# Optional list of bash scripts defining functions that do installation and maintenance operations. See »./setup-scripts/README.md« below for more information. To define additional functions (or overwrite individual default ones), use »(lib.attrValues lib.wip.setup-scripts) ++ [ ]« and place any extra scripts in the array.
scripts ? (lib.attrValues setup-scripts),
# The »nixosSystem« function defined in »<nixpkgs>/flake.nix«, or equivalent. # The »nixosSystem« function defined in »<nixpkgs>/flake.nix«, or equivalent.
nixosSystem ? inputs.nixpkgs.lib.nixosSystem, nixosSystem ? inputs.nixpkgs.lib.nixosSystem,
# If provided, then cross compilation is enabled for all hosts whose target architecture is different from this. Since cross compilation currently fails for (some stuff in) NixOS, better don't set »localSystem«. Without it, building for other platforms works fine (just slowly) if »boot.binfmt.emulatedSystems« on the building system is configured for the respective target(s). # If provided, then cross compilation is enabled for all hosts whose target architecture is different from this. Since cross compilation currently fails for (some stuff in) NixOS, better don't set »localSystem«. Without it, building for other platforms works fine (just slowly) if »boot.binfmt.emulatedSystems« on the building system is configured for the respective target(s).
localSystem ? null, localSystem ? null,
... }: let ... }: let
otherArgs = (builtins.removeAttrs args [ "systems" ]) // { inherit systems overlays modules specialArgs scripts inputs configPath nixosSystem localSystem; }; otherArgs = (builtins.removeAttrs args [ "systems" ]) // { inherit inputs systems overlays modules specialArgs nixosSystem localSystem; };
nixosConfigurations = if builtins.isList systems then mergeAttrsUnique (map (systems: mkNixosConfigurations (otherArgs // systems)) systems) else mkNixosConfigurations (otherArgs // systems); nixosConfigurations = if builtins.isList systems then mergeAttrsUnique (map (systems: mkNixosConfigurations (otherArgs // systems)) systems) else mkNixosConfigurations (otherArgs // systems);
in let outputs = { in let outputs = {
inherit nixosConfigurations; inherit nixosConfigurations;
} // (forEachSystem [ "aarch64-linux" "x86_64-linux" ] (localSystem: let } // (forEachSystem [ "aarch64-linux" "x86_64-linux" ] (localSystem: let
pkgs = (import inputs.nixpkgs { inherit overlays; system = localSystem; }); pkgs = (import inputs.nixpkgs { inherit overlays; system = localSystem; });
nix = if lib.versionOlder pkgs.nix.version "2.4" then pkgs.nix_2_4 else pkgs.nix; tools = lib.unique (map (p: p.outPath) (lib.filter lib.isDerivation pkgs.stdenv.allowedRequisites));
nix_wrapped = pkgs.writeShellScriptBin "nix" ''exec ${nix}/bin/nix --extra-experimental-features nix-command "$@"''; PATH = lib.concatMapStringsSep ":" (pkg: "${pkg}/bin") tools;
in (if scripts == [ ] || scripts == null then { } else { in {
# E.g.: $ nix run .#$target -- install-system /tmp/system-$target.img # Do per-host setup and maintenance things:
# E.g.: $ nix run /etc/nixos/#$(hostname) -- sudo # SYNOPSIS: nix run REPO#HOST [-- [sudo] [bash | -x [-c SCRIPT | FUNC ...ARGS]]]
# If the first argument (after »--«) is »sudo«, then the program will re-execute itself with sudo as root (minus that »sudo« argument). # Where »REPO« is the path to a flake repo using »mkSystemsFlake« for it's »apps« output, and »HOST« is the name of a host it defines.
# If the first/next argument is »bash«, it will execute an interactive shell with the variables and functions sourced (largely equivalent to »nix develop .#$host«). # If the first argument (after »--«) is »sudo«, then the program will re-execute itself as root using sudo (minus that »sudo« argument).
apps = lib.mapAttrs (name: system: let # If the (then) first argument is »bash«, or if there are no (more) arguments, it will execute an interactive shell with the variables and functions sourced (largely equivalent to »nix develop .#$host«).
appliedScripts = substituteImplicit { inherit pkgs scripts; context = system // { native = pkgs; }; }; # »-x« as next argument runs »set -x«. If the next argument is »-c«, it will evaluate (only) the following argument as bash script, otherwise the argument will be called as command, with all following arguments as arguments tot he command.
# Examples:
in { type = "app"; program = "${pkgs.writeShellScript "scripts-${name}" '' # Install the host named »$target« to the image file »/tmp/system-$target.img«:
# $ nix run .#$target -- install-system /tmp/system-$target.img
# Run an interactive bash session with the setup functions in the context of the current host:
# $ nix run /etc/nixos/#$(hostname)
# Run an root session in the context of a different host (useful if Nix is not installed for root on the current host):
# $ nix run /etc/nixos/#other-host -- sudo
apps = lib.mapAttrs (name: system: { type = "app"; program = "${pkgs.writeShellScript "scripts-${name}" ''
# if first arg is »sudo«, re-execute this script with sudo (as root) # if first arg is »sudo«, re-execute this script with sudo (as root)
if [[ $1 == sudo ]] ; then shift ; exec sudo --preserve-env=SSH_AUTH_SOCK -- "$0" "$@" ; fi if [[ $1 == sudo ]] ; then shift ; exec sudo --preserve-env=SSH_AUTH_SOCK -- "$0" "$@" ; fi
@ -195,48 +214,41 @@ in rec {
fi fi
# provide installer tools (native to localSystem, not targetSystem) # provide installer tools (native to localSystem, not targetSystem)
hostPath=$PATH hostPath=$PATH ; PATH=${PATH}
PATH=${pkgs.nixos-install-tools}/bin:${nix_wrapped}/bin:${nix}/bin:${pkgs.util-linux}/bin:${pkgs.coreutils}/bin:${pkgs.gnused}/bin:${pkgs.gnugrep}/bin:${pkgs.findutils}/bin:${pkgs.tree}/bin:${pkgs.gawk}/bin:${pkgs.zfs}/bin
${appliedScripts} ${system.config.${prefix}.setup.appliedScripts { native = pkgs; }}
# either call »$1« with the remaining parameters as arguments, or if »$1« is »-c« eval »$2«. # either call »$1« with the remaining parameters as arguments, or if »$1« is »-c« eval »$2«.
if [[ ''${1:-} == -x ]] ; then shift ; set -x ; fi if [[ ''${1:-} == -x ]] ; then shift ; set -x ; fi
if [[ ''${1:-} == -c ]] ; then eval "$2" ; else "$@" ; fi if [[ ''${1:-} == -c ]] ; then eval "$2" ; else "$@" ; fi
''}"; }) nixosConfigurations; ''}"; }) nixosConfigurations;
# E.g.: $ nix develop /etc/nixos/#$(hostname) # E.g.: $ nix develop /etc/nixos/#$(hostname)
# ... and then call any of the functions in ./utils/functions.sh (in the context of »$(hostname)«, where applicable). # ... and then call any of the functions in ./utils/setup-scripts/ (in the context of »$(hostname)«, where applicable).
# To get an equivalent root shell: $ nix run /etc/nixos/#functions-$(hostname) -- sudo bash # To get an equivalent root shell: $ nix run /etc/nixos/#functions-$(hostname) -- sudo bash
devShells = lib.mapAttrs (name: system: pkgs.mkShell (let devShells = lib.mapAttrs (name: system: pkgs.mkShell {
appliedScripts = substituteImplicit { inherit pkgs scripts; context = system // { native = pkgs; }; }; nativeBuildInputs = tools ++ [ pkgs.nixos-install-tools ];
in {
nativeBuildInputs = [ pkgs.nixos-install-tools nix_wrapped pkgs.nix ];
shellHook = '' shellHook = ''
${appliedScripts} ${system.config.${prefix}.setup.appliedScripts { native = pkgs; }}
# add active »hostName« to shell prompt # add active »hostName« to shell prompt
PS1=''${PS1/\\$/\\[\\e[93m\\](${name})\\[\\e[97m\\]\\$} PS1=''${PS1/\\$/\\[\\e[93m\\](${name})\\[\\e[97m\\]\\$}
''; '';
})) nixosConfigurations; }) nixosConfigurations;
}) // {
# dummy that just pulls in all system builds # dummy that just pulls in all system builds
packages.all-systems = pkgs.runCommandLocal "all-systems" { } '' packages.all-systems = pkgs.runCommandLocal "all-systems" { } ''
mkdir -p $out/systems ${''
${lib.concatStringsSep "\n" (lib.mapAttrsToList (name: system: ( mkdir -p $out/systems
"ln -sT ${system.config.system.build.toplevel} $out/systems/${name}" ${lib.concatStringsSep "\n" (lib.mapAttrsToList (name: system: "ln -sT ${system.config.system.build.toplevel} $out/systems/${name}") nixosConfigurations)}
)) nixosConfigurations)} ''}
${lib.optionalString (scripts != [ ]) '' ${''
mkdir -p $out/scripts mkdir -p $out/scripts
${lib.concatStringsSep "\n" (lib.mapAttrsToList (name: _: "ln -sT ${outputs.apps.${localSystem}.${name}.program} $out/scripts/${name}") nixosConfigurations)} ${lib.concatStringsSep "\n" (lib.mapAttrsToList (name: system: "ln -sT ${outputs.apps.${localSystem}.${name}.program} $out/scripts/${name}") nixosConfigurations)}
''} ''}
${lib.optionalString (inputs != { }) '' ${lib.optionalString (inputs != { }) ''
mkdir -p $out/inputs mkdir -p $out/inputs
${lib.concatStringsSep "\n" (lib.mapAttrsToList (name: { outPath, ... }: "ln -sT ${outPath} $out/inputs/${name}") inputs)} ${lib.concatStringsSep "\n" (lib.mapAttrsToList (name: { outPath, ... }: "ln -sT ${outPath} $out/inputs/${name}") inputs)}
''} ''}
${lib.optionalString (configPath != null) "ln -sT ${configPath} $out/config"}
''; '';
})); in outputs; })); in outputs;

View File

@ -23,7 +23,7 @@ in rec {
# Bash does not support any nested data structures. Lists or attrsets in within lists or attrsets are therefore (recursively) encoded and escaped as strings, such that calling »eval« on them is safe if (but only if) they are known to be encoded from nested lists/attrsets. Example: »eval 'declare -A fs='"@{config.fileSystems['/']}" ; root=${fs[device]}«. # Bash does not support any nested data structures. Lists or attrsets in within lists or attrsets are therefore (recursively) encoded and escaped as strings, such that calling »eval« on them is safe if (but only if) they are known to be encoded from nested lists/attrsets. Example: »eval 'declare -A fs='"@{config.fileSystems['/']}" ; root=${fs[device]}«.
# Any other value (functions), and things that »builtins.toString« doesn't like, will throw here. # Any other value (functions), and things that »builtins.toString« doesn't like, will throw here.
substituteImplicit = args@{ substituteImplicit = args@{
scripts, # List of paths to scripts to process and then source in the returned script. Can also be an attrset, of which (only) the values will be used, and each script may also be an attrset »{ name; text; }« instead of a path. scripts, # List of paths to scripts to process and then source in the returned script. Each script may also be an attrset »{ name; text; }« instead of a path.
context, # The root attrset for the resolution of substitutions. context, # The root attrset for the resolution of substitutions.
pkgs, # Instantiated »nixpkgs«, as fallback location for helpers, and to grab »writeScript« etc from. pkgs, # Instantiated »nixpkgs«, as fallback location for helpers, and to grab »writeScript« etc from.
helpers ? { }, # Attrset of (highest priority) helper functions. helpers ? { }, # Attrset of (highest priority) helper functions.
@ -32,7 +32,7 @@ in rec {
scripts = map (source: rec { scripts = map (source: rec {
text = if builtins.isAttrs source then source.text else builtins.readFile source; name = if builtins.isAttrs source then source.name else builtins.baseNameOf source; text = if builtins.isAttrs source then source.text else builtins.readFile source; name = if builtins.isAttrs source then source.name else builtins.baseNameOf source;
parsed = builtins.split ''@\{([#!]?)([a-zA-Z][a-zA-Z0-9_.-]*[a-zA-Z0-9](![a-zA-Z][a-zA-Z0-9_.-]*[a-zA-Z0-9])?)([:*@\[#%/^,\}])'' text; # (first part of a bash parameter expansion, with »@« instead of »$«) parsed = builtins.split ''@\{([#!]?)([a-zA-Z][a-zA-Z0-9_.-]*[a-zA-Z0-9](![a-zA-Z][a-zA-Z0-9_.-]*[a-zA-Z0-9])?)([:*@\[#%/^,\}])'' text; # (first part of a bash parameter expansion, with »@« instead of »$«)
}) (if builtins.isAttrs args.scripts then builtins.attrValues args.scripts else args.scripts); }) args.scripts;
decls = lib.unique (map (match: builtins.elemAt match 1) (builtins.filter builtins.isList (builtins.concatMap (script: script.parsed) scripts))); decls = lib.unique (map (match: builtins.elemAt match 1) (builtins.filter builtins.isList (builtins.concatMap (script: script.parsed) scripts)));
vars = pkgs.writeText "vars" ("#!/usr/bin/env bash\n" + (lib.concatMapStringsSep "\n" (decl: let vars = pkgs.writeText "vars" ("#!/usr/bin/env bash\n" + (lib.concatMapStringsSep "\n" (decl: let
call = let split = builtins.split "!" decl; in if (builtins.length split) == 1 then null else builtins.elemAt split 2; call = let split = builtins.split "!" decl; in if (builtins.length split) == 1 then null else builtins.elemAt split 2;

View File

@ -3,16 +3,15 @@
This is a library of bash functions, mostly for NixOS system installation. This is a library of bash functions, mostly for NixOS system installation.
The (paths to these) scripts are meant to be passed in the `scripts` argument to [`mkSystemsFlake`](../flakes.nix#mkSystemsFlake) (see [`flake.nix`](../../flake.nix) for an example), which makes their functions available in the per-host [`devShells`/`apps`](../flakes.nix#mkSystemsFlake). The (paths to these) scripts are meant to be (and by default are) set as `config.wip.setup.scripts.*` (see [`../flakes.nix`](../flakes.nix)), which makes their functions available in the per-host [`devShells`/`apps`](../flakes.nix#mkSystemsFlake).
Host-specific nix variables are available to the bash functions as `@{...}` through [`substituteImplicit`](../scripts.nix#substituteImplicit) with the respective host as root context. Host-specific nix variables are available to the bash functions as `@{...}` through [`substituteImplicit`](../scripts.nix#substituteImplicit) with the respective host as root context.
Any script passed later in `scripts` can overwrite the functions of these (earlier) default scripts. Any script passed later in `scripts` can overwrite the functions of these (earlier) default scripts.
With the functions from here, [a simple four-liner](../install.sh) is enough to do a completely automated NixOS installation: With the functions from here, [a simple three-liner](../install.sh) is enough to do a completely automated NixOS installation:
```bash ```bash
function install-system {( set -eu # 1: diskPaths function install-system {( set -eu # 1: diskPaths
prepare-installer "$@" prepare-installer "$@"
do-disk-setup "${argv[0]}" do-disk-setup "${argv[0]}"
init-or-restore-system
install-system-to $mnt install-system-to $mnt
)} )}
``` ```

View File

@ -2,109 +2,114 @@
## ##
# Key Generation Methods # Key Generation Methods
# See »../../modules/fs/keystore.nix.md« for more documentation. # See »../../modules/fs/keystore.nix.md« for more documentation.
# It is probably generally advisable that these functions output ASCII strings.
# Keys used as ZFS encryption keys (with the implicit »keyformat = hex«) must be 64 (lowercase?) hex digits.
## ##
## Puts an empty key in the keystore, causing that ZFS dataset to be unencrypted, even if it's parent is encrypted. ## Outputs nothing (/ an empty key), causing that ZFS dataset to be unencrypted, even if it's parent is encrypted.
function add-key-unencrypted {( set -eu # 1: usage function gen-key-unencrypted {( set -eu # 1: usage
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 :
: | write-secret "$keystore"/"$usage".key
)} )}
## Adds a key by copying the hostname to the keystore. ## Uses the hostname as a trivial key.
function add-key-hostname {( set -eu # 1: usage function gen-key-hostname {( set -eu # 1: usage
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 usage=$1
if [[ ! "$usage" =~ ^(luks/keystore-@{config.networking.hostName!hashString.sha256:0:8}/.*)$ ]] ; then printf '»trivial« key mode is only available for the keystore itself.\n' ; exit 1 ; fi if [[ ! "$usage" =~ ^(luks/keystore-@{config.networking.hostName!hashString.sha256:0:8}/.*)$ ]] ; then printf '»trivial« key mode is only available for the keystore itself.\n' 1>&2 ; exit 1 ; fi
printf %s "@{config.networking.hostName}" | write-secret "$keystore"/"$usage".key printf %s "@{config.networking.hostName}"
)} )}
## Adds a key by copying it from a bootkey partition (see »add-bootkey-to-keydev«) to the keystore. ## Obtains a key by reading it from a bootkey partition (see »add-bootkey-to-keydev«).
function add-key-usb-part {( set -eu # 1: usage function gen-key-usb-part {( set -eu # 1: usage
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 usage=$1
if [[ ! "$usage" =~ ^(luks/keystore-[^/]+/[1-8])$ ]] ; then printf '»usb-part« key mode is only available for the keystore itself.\n' ; exit 1 ; fi if [[ ! "$usage" =~ ^(luks/keystore-[^/]+/[1-8])$ ]] ; then printf '»usb-part« key mode is only available for the keystore itself.\n' 1>&2 ; exit 1 ; fi
bootkeyPartlabel=bootkey-"@{config.networking.hostName!hashString.sha256:0:8}" bootkeyPartlabel=bootkey-"@{config.networking.hostName!hashString.sha256:0:8}"
cat /dev/disk/by-partlabel/"$bootkeyPartlabel" | write-secret "$keystore"/"$usage".key cat /dev/disk/by-partlabel/"$bootkeyPartlabel"
)} )}
## Adds a key by copying a different key from the keystore to the keystore. ## Outputs a key by simply printing an different keystore entry (that must have been generated before).
function add-key-copy {( set -eu # 1: usage, 2: source function gen-key-copy {( set -eu # 1: _, 2: source
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 ; source=$2 keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; source=$2
cat "$keystore"/"$source".key | write-secret "$keystore"/"$usage".key cat "$keystore"/"$source".key
)} )}
## Adds a key by writing a constant value to the keystore. ## Outputs a key by simply using the constant »$value« passed in.
function add-key-constant {( set -eu # 1: usage, 2: value function gen-key-constant {( set -eu # 1: _, 2: value
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 ; value=$2 value=$2
printf %s "$value" | write-secret "$keystore"/"$usage".key printf %s "$value"
)} )}
## Adds a key by prompting for a password and saving it to the keystore. ## Obtains a key by prompting for a password.
function add-key-password {( set -eu # 1: usage function gen-key-password {( set -eu # 1: usage
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 usage=$1
(prompt-new-password "as key for @{config.networking.hostName}/$usage" || exit 1) \ ( prompt-new-password "as key for @{config.networking.hostName}/$usage" || exit 1 )
| write-secret "$keystore"/"$usage".key
)} )}
## Generates a key by prompting for a password, combining it with »$keystore/home/$user.key«, and saving it to the keystore. ## Generates a key by prompting for (or reusing) a »$user«'s password, combining it with »$keystore/home/$user.key«.
function add-key-home-pw {( set -eu # 1: usage, 2: user function gen-key-home-composite {( set -eu # 1: usage, 2: user
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 ; user=$2 keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 ; user=$2
if [[ ${!userPasswords[@]} && ${userPasswords[$user]:-} ]] ; then if [[ ${!userPasswords[@]} && ${userPasswords[$user]:-} ]] ; then
password=${userPasswords[$user]} password=${userPasswords[$user]}
else else
password=$(prompt-new-password "that will be used as component of the key for @{config.networking.hostName}/$usage") password=$(prompt-new-password "that will be used as component of the key for @{config.networking.hostName}/$usage")
fi fi
( cat "$keystore"/home/"$user".key && cat <<<"$password" ) | sha256sum | head -c 64 \ ( cat "$keystore"/home/"$user".key && cat <<<"$password" ) | sha256sum | head -c 64
| write-secret "$keystore"/"$usage".key
)} )}
## Generates a reproducible secret for a certain »$use«case by prompting for a pin/password and then challenging slot »$slot« of YubiKey »$serial«, and saves it to the »$keystore«. ## Generates a reproducible, host-independent key by challenging slot »$slot« of YubiKey »$serial« with »$user«'s password.
function add-key-yubikey-pin {( set -eu # 1: usage, 2: serialAndSlot(as »serial:slot«) function gen-key-home-yubikey {( set -eu # 1: usage, 2: serialAndSlotAndUser(as »serial:slot:user«)
usage=$1 ; args=$2
serial=$( <<<"$args" cut -d: -f1 ) ; slot=$( <<<"$args" cut -d: -f2 )
user=${args/$serial:$slot:/}
if [[ ${!userPasswords[@]} && ${userPasswords[$user]:-} ]] ; then
password=${userPasswords[$user]}
else
password=$(prompt-new-password "as YubiKey challenge for @{config.networking.hostName}/$usage")
fi
gen-key-yubikey-challenge "$usage" "$serial:$slot:home-$password" true "${user}'s password for ${usage}"
)}
## Generates a reproducible secret by prompting for a pin/password and then challenging slot »$slot« of YubiKey »$serial«.
function gen-key-yubikey-pin {( set -eu # 1: usage, 2: serialAndSlot(as »serial:slot«)
usage=$1 ; serialAndSlot=$2 usage=$1 ; serialAndSlot=$2
password=$(prompt-new-password "/ pin as challenge to YubiKey »$serialAndSlot« as key for @{config.networking.hostName}/$usage") password=$(prompt-new-password "/ pin as challenge to YubiKey »$serialAndSlot« as key for @{config.networking.hostName}/$usage")
add-key-yubikey-challenge "$usage" "$serialAndSlot:$password" true "pin for ${usage}" gen-key-yubikey-challenge "$usage" "$serialAndSlot:$password" true "pin for ${usage}"
)} )}
## Generates a reproducible secret for a certain »$use«case on a »$host« by challenging slot »$slot« of YubiKey »$serial«, and saves it to the »$keystore«. ## Generates a reproducible secret for a certain »$use«case and optionally »$salt« on a »$host« by challenging slot »$slot« of YubiKey »$serial«.
function add-key-yubikey {( set -eu # 1: usage, 2: serialAndSlotAndSalt(as »serial:slot:salt«) function gen-key-yubikey {( set -eu # 1: usage, 2: serialAndSlotAndSalt(as »serial:slot:salt«)
usage=$1 ; IFS=':' read -ra serialAndSlotAndSalt <<< "$2" usage=$1 ; args=$2
usage_="$usage" ; if [[ "$usage" =~ ^(luks/.*/[0-8])$ ]] ; then usage_="${usage:0:(-2)}" ; fi # produce the same secret, regardless of the target luks slot serial=$( <<<"$args" cut -d: -f1 ) ; slot=$( <<<"$args" cut -d: -f2 )
challenge="@{config.networking.hostName}:$usage_${serialAndSlotAndSalt[2]:+:${serialAndSlotAndSalt[2]:-}}" salt=${args/$serial:$slot:/}
add-key-yubikey-challenge "$usage" "${serialAndSlotAndSalt[0]}:${serialAndSlotAndSalt[1]}:$challenge" usagE="$usage" ; if [[ "$usage" =~ ^(luks/.*/[0-8])$ ]] ; then usagE="${usage:0:(-2)}" ; fi # produce the same secret, regardless of the target luks slot
challenge="@{config.networking.hostName}:$usagE${salt:+:$salt}"
gen-key-yubikey-challenge "$usage" "$serial:$slot:$challenge"
)} )}
## Generates a reproducible secret for a certain »$use«case by challenging slot »$slot« of YubiKey »$serial« with the fixed »$challenge«, and saves it to the »$keystore«. ## Generates a reproducible secret by challenging slot »$slot« of YubiKey »$serial« with the fixed »$challenge«.
# If »$sshArgs« is set as (env) var, generate the secret locally, then use »ssh $sshArgs« to write the secret on the other end. function gen-key-yubikey-challenge {( set -eu # 1: _, 2: serialAndSlotAndChallenge(as »$serial:$slot:$challenge«), 3?: onlyOnce, 4?: message
# E.g.: # sshArgs='installerIP' add-key-yubikey /run/keystore/ zfs/rpool/remote 1234567:2:customChallenge args=$2 ; message=${4:-}
function add-key-yubikey-challenge {( set -eu # 1: usage, 2: serialAndSlotAndChallenge(as »$serial:$slot:$challenge«), 3?: onlyOnce, 4?: message serial=$( <<<"$args" cut -d: -f1 ) ; slot=$( <<<"$args" cut -d: -f2 )
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 ; args=$2 ; message=${4:-}
serial=$(<<<"$args" cut -d: -f1)
slot=$(<<<"$args" cut -d: -f2)
challenge=${args/$serial:$slot:/} challenge=${args/$serial:$slot:/}
if [[ "$serial" != "$(@{native.yubikey-personalization}/bin/ykinfo -sq)" ]] ; then printf 'Please insert / change to YubiKey with serial %s!\n' "$serial" ; fi if [[ "$serial" != "$(@{native.yubikey-personalization}/bin/ykinfo -sq)" ]] ; then printf 'Please insert / change to YubiKey with serial %s!\n' "$serial" 1>&2 ; fi
if [[ ! "${3:-}" ]] ; then if [[ ! "${3:-}" ]] ; then
read -p 'Challenging YubiKey '"$serial"' slot '"$slot"' twice with '"${message:-challenge »"$challenge":1/2«}"'. Enter to continue, or Ctrl+C to abort:' read -p 'Challenging YubiKey '"$serial"' slot '"$slot"' twice with '"${message:-challenge »"$challenge":1/2«}"'. Enter to continue, or Ctrl+C to abort:'
else else
read -p 'Challenging YubiKey '"$serial"' slot '"$slot"' once with '"${message:-challenge »"$challenge"«}"'. Enter to continue, or Ctrl+C to abort:' read -p 'Challenging YubiKey '"$serial"' slot '"$slot"' once with '"${message:-challenge »"$challenge"«}"'. Enter to continue, or Ctrl+C to abort:'
fi fi
if [[ "$serial" != "$(@{native.yubikey-personalization}/bin/ykinfo -sq)" ]] ; then printf 'YubiKey with serial %s not present, aborting.\n' "$serial" ; exit 1 ; fi if [[ "$serial" != "$(@{native.yubikey-personalization}/bin/ykinfo -sq)" ]] ; then printf 'YubiKey with serial %s not present, aborting.\n' "$serial" 1>&2 ; exit 1 ; fi
if [[ ! "${3:-}" ]] ; then if [[ ! "${3:-}" ]] ; then
secret="$(@{native.yubikey-personalization}/bin/ykchalresp -"$slot" "$challenge":1)""$(@{native.yubikey-personalization}/bin/ykchalresp -2 "$challenge":2)" secret="$(@{native.yubikey-personalization}/bin/ykchalresp -"$slot" "$challenge":1)""$(@{native.yubikey-personalization}/bin/ykchalresp -2 "$challenge":2)"
if [[ ${#secret} != 80 ]] ; then printf 'YubiKey challenge failed, aborting.\n' "$serial" ; exit 1 ; fi if [[ ${#secret} != 80 ]] ; then printf 'YubiKey challenge failed, aborting.\n' "$serial" 1>&2 ; exit 1 ; fi
else else
secret="$(@{native.yubikey-personalization}/bin/ykchalresp -"$slot" "$challenge")" secret="$(@{native.yubikey-personalization}/bin/ykchalresp -"$slot" "$challenge")"
if [[ ${#secret} != 40 ]] ; then printf 'YubiKey challenge failed, aborting.\n' "$serial" ; exit 1 ; fi if [[ ${#secret} != 40 ]] ; then printf 'YubiKey challenge failed, aborting.\n' "$serial" 1>&2 ; exit 1 ; fi
fi
if [[ ! "${sshArgs:-}" ]] ; then
printf %s "$secret" | ( head -c 64 | write-secret "$keystore"/"$usage".key )
else
read -p 'Uploading secret with »ssh '"$sshArgs"'«. Enter to continue, or Ctrl+C to abort:'
printf %s "$secret" | ( head -c 64 | ssh $sshArgs /etc/nixos/utils/functions.sh write-secret "$keystore"/"$usage".key )
fi fi
printf %s "$secret" | head -c 64
)} )}
## Generates a random secret key and saves it to the keystore. ## Generates a random secret key.
function add-key-random {( set -eu # 1: usage function gen-key-random {( set -eu # 1: usage
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 </dev/urandom tr -dc 0-9a-f | head -c 64
</dev/urandom tr -dc 0-9a-f | head -c 64 | write-secret "$keystore"/"$usage".key
)} )}

View File

@ -52,9 +52,11 @@ function partition-disks { { # 1: diskPaths
install -o root -g root -m 640 -T /dev/null "$outFile" && truncate -s "${disk[size]}" "$outFile" && install -o root -g root -m 640 -T /dev/null "$outFile" && truncate -s "${disk[size]}" "$outFile" &&
blockDevs[$name]=$(losetup --show -f "$outFile") && prepend_trap "losetup -d '${blockDevs[$name]}'" EXIT # NOTE: this must not be inside a sub-shell! blockDevs[$name]=$(losetup --show -f "$outFile") && prepend_trap "losetup -d '${blockDevs[$name]}'" EXIT # NOTE: this must not be inside a sub-shell!
else else
local size=$( blockdev --getsize64 "${blockDevs[$name]}" || : ) local size=$( blockdev --getsize64 "${blockDevs[$name]}" || : ) ; local waste=$(( size - ${disk[size]} ))
if [[ ! $size ]] ; then echo "Block device $name does not exist at ${blockDevs[$name]}" ; exit 1 ; fi if [[ ! $size ]] ; then echo "Block device $name does not exist at ${blockDevs[$name]}" ; exit 1 ; fi
if [[ $size != "${disk[size]}" ]] ; then echo "Block device ${blockDevs[$name]}'s size $size does not match the size ${disk[size]} declared for $name" ; exit 1 ; fi if (( waste < 0 )) ; then echo "Block device ${blockDevs[$name]}'s size $size is smaller than the size ${disk[size]} declared for $name" ; exit 1 ; fi
if (( waste > 0 )) && [[ ! ${disk[allowLarger]:-} ]] ; then echo "Block device ${blockDevs[$name]}'s size $size is bigger than the size ${disk[size]} declared for $name" ; exit 1 ; fi
if (( waste > 0 )) ; then echo "Wasting $(( waste / 1024))K of ${blockDevs[$name]} due to the size declared for $name (should be ${size}b)" ; fi
blockDevs[$name]=$(realpath "${blockDevs[$name]}") blockDevs[$name]=$(realpath "${blockDevs[$name]}")
fi fi
done done
@ -73,7 +75,7 @@ function partition-disks { { # 1: diskPaths
if [[ ${disk[serial]} != "$actual" ]] ; then echo "Block device $blockDev's serial ($actual) does not match the serial (${disk[serial]}) declared for ${disk[name]}" ; exit 1 ; fi if [[ ${disk[serial]} != "$actual" ]] ; then echo "Block device $blockDev's serial ($actual) does not match the serial (${disk[serial]}) declared for ${disk[name]}" ; exit 1 ; fi
fi fi
# can (and probably should) restore the backup: # can (and probably should) restore the backup:
( PATH=@{native.gptfdisk}/bin ; set -x ; sgdisk --zap-all --load-backup=@{config.wip.fs.disks.partitioning}/"${disk[name]}".backup "${blockDevs[${disk[name]}]}" >$beQuiet ) ( PATH=@{native.gptfdisk}/bin ; set -x ; sgdisk --zap-all --load-backup=@{config.wip.fs.disks.partitioning}/"${disk[name]}".backup ${disk[allowLarger]:+--move-second-header} "${blockDevs[${disk[name]}]}" >$beQuiet )
#partition-disk "${disk[name]}" "${blockDevs[${disk[name]}]}" #partition-disk "${disk[name]}" "${blockDevs[${disk[name]}]}"
done done
@{native.parted}/bin/partprobe "${blockDevs[@]}" @{native.parted}/bin/partprobe "${blockDevs[@]}"

View File

@ -7,11 +7,10 @@
function install-system {( set -eu # 1: blockDev function install-system {( set -eu # 1: blockDev
prepare-installer "$@" prepare-installer "$@"
do-disk-setup "${argv[0]}" do-disk-setup "${argv[0]}"
init-or-restore-system
install-system-to $mnt install-system-to $mnt
)} )}
## Does very simple argument passing and validation, performs some sanity checks, includes a hack to make installation work when nix isn't installed for root, and enables debugging (if requested). ## Does very simple argument paring and validation, performs some sanity checks, includes a hack to make installation work when nix isn't installed for root, and enables debugging (if requested).
function prepare-installer { # ... function prepare-installer { # ...
generic-arg-parse "$@" generic-arg-parse "$@"
@ -30,32 +29,21 @@ function prepare-installer { # ...
if [[ -e "/dev/mapper/$luksName" ]] ; then echo "LUKS device mapping »$luksName« is already open. Close it before running the installer." ; exit 1 ; fi if [[ -e "/dev/mapper/$luksName" ]] ; then echo "LUKS device mapping »$luksName« is already open. Close it before running the installer." ; exit 1 ; fi
done done
local poolName ; for poolName in "@{!config.wip.fs.zfs.pools[@]}" ; do local poolName ; for poolName in "@{!config.wip.fs.zfs.pools[@]}" ; do
if zfs get -o value -H name "$poolName" &>/dev/null ; then echo "ZFS pool »$poolName« is already imported. Export the pool before running the installer." ; exit 1 ; fi if @{native.zfs}/bin/zfs get -o value -H name "$poolName" &>/dev/null ; then echo "ZFS pool »$poolName« is already imported. Export the pool before running the installer." ; exit 1 ; fi
done done
if [[ ${SUDO_USER:-} ]] ; then function nix {( set +x ; declare -a args=("$@") ; PATH=$hostPath su - "$SUDO_USER" -c "$(declare -p args)"' ; nix "${args[@]}"' )} ; fi if [[ ${SUDO_USER:-} ]] ; then function nix {( set +x ; declare -a args=("$@") ; PATH=$hostPath su - "$SUDO_USER" -c "$(declare -p args)"' ; nix "${args[@]}"' )} ; fi
if [[ ${args[debug]:-} ]] ; then set +e ; set -E ; trap 'code= ; cat 2>/dev/null || true ; @{native.bashInteractive}/bin/bash --init-file @{config.environment.etc.bashrc.source} || code=$? ; if [[ $code ]] ; then exit $code ; fi' ERR ; fi # On error, instead of exiting straight away, open a shell to allow diagnosing/fixing the issue. Only exit if that shell reports failure (e.g. CtrlC + CtrlD). Unfortunately, the exiting has to be repeated for level of each nested sub-shells. The cat eats anything lined up on stdin, which would otherwise be run in the shell (TODO: but it blocks if there is nothing on stdin, requiring Ctrl+D to be pressed). if [[ ${args[debug]:-} ]] ; then set +e ; set -E ; trap 'code= ; cat 2>/dev/null || true ; @{native.bashInteractive}/bin/bash --init-file @{config.environment.etc.bashrc.source} || code=$? ; if [[ $code ]] ; then exit $code ; fi' ERR ; fi # On error, instead of exiting straight away, open a shell to allow diagnosing/fixing the issue. Only exit if that shell reports failure (e.g. CtrlC + CtrlD). Unfortunately, the exiting has to be repeated for level of each nested sub-shells. The cat eats anything lined up on stdin, which would otherwise be run in the shell (TODO: but it blocks if there is nothing on stdin, requiring Ctrl+D to be pressed).
} export PATH=$PATH:@{native.util-linux}/bin # Doing a system installation requires a lot of stuff from »util-linux«. This should probably be moved into the individual functions that actually use the tools ...
## Depending on the presence or absence of the »--restore« CLI flag, either runs the system's initialization or restore commands. }
# The initialization commands are expected to create files that can't be stored in the (host's or target system's) nix store (i.e. secrets).
# The restore commands are expected to pull in a backup of the systems secrets and state from somewhere, and need to acknowledge that something happened by running »restore-supported-callback«.
function init-or-restore-system {( set -eu # (void)
if [[ ! ${args[restore]:-} ]] ; then
run-hook-script 'System Initialization' @{config.wip.fs.disks.initSystemCommands!writeText.initSystemCommands} # TODO: Do this later inside the chroot?
return # usually, this would be it ...
fi
requiresRestoration=$(mktemp) ; trap "rm -f '$requiresRestoration'" EXIT ; function restore-supported-callback {( rm -f "$requiresRestoration" )}
run-hook-script 'System Restoration' @{config.wip.fs.disks.restoreSystemCommands!writeText.restoreSystemCommands}
if [[ -e $requiresRestoration ]] ; then echo 'The »restoreSystemCommands« did not call »restore-supported-callback« to mark backup restoration as supported for this system. Assuming incomplete configuration.' 1>&2 ; exit 1 ; fi
)}
## The default command that will activate the system and install the bootloader. In a separate function to make it easy to replace. ## The default command that will activate the system and install the bootloader. In a separate function to make it easy to replace.
function nixos-install-cmd {( set -eu # 1: mnt, 2: topLevel function nixos-install-cmd {( set -eu # 1: mnt, 2: topLevel
# »nixos-install« by default does some stateful things (see the »--no« options below), builds and copies the system config (but that's already done), and then calls »NIXOS_INSTALL_BOOTLOADER=1 nixos-enter -- $topLevel/bin/switch-to-configuration boot«, which is essentially the same as »NIXOS_INSTALL_BOOTLOADER=1 nixos-enter -- @{config.system.build.installBootLoader} $targetSystem«, i.e. the side effects of »nixos-enter« and then calling the bootloader-installer. # »nixos-install« by default does some stateful things (see the »--no« options below), builds and copies the system config (but that's already done), and then calls »NIXOS_INSTALL_BOOTLOADER=1 nixos-enter -- $topLevel/bin/switch-to-configuration boot«, which is essentially the same as »NIXOS_INSTALL_BOOTLOADER=1 nixos-enter -- @{config.system.build.installBootLoader} $targetSystem«, i.e. the side effects of »nixos-enter« and then calling the bootloader-installer.
PATH=@{config.systemd.package}/bin:$PATH TMPDIR=/tmp LC_ALL=C nixos-install --system "$2" --no-root-passwd --no-channel-copy --root "$1" #--debug PATH=@{config.systemd.package}/bin:@{native.nix}/bin:$PATH TMPDIR=/tmp LC_ALL=C @{native.nixos-install-tools}/bin/nixos-install --system "$2" --no-root-passwd --no-channel-copy --root "$1" #--debug
)} )}
## Copies the system's dependencies to the disks mounted at »$mnt« and installs the bootloader. If »$inspect« is set, a root shell will be opened in »$mnt« afterwards. ## Copies the system's dependencies to the disks mounted at »$mnt« and installs the bootloader. If »$inspect« is set, a root shell will be opened in »$mnt« afterwards.
@ -65,14 +53,8 @@ function install-system-to {( set -eu # 1: mnt, 2?: topLevel
targetSystem=@{config.system.build.toplevel} targetSystem=@{config.system.build.toplevel}
trap - EXIT # start with empty traps for sub-shell trap - EXIT # start with empty traps for sub-shell
# Copy system closure to new nix store:
mkdir -p -m 755 $mnt/nix/var/nix ; mkdir -p -m 1775 $mnt/nix/store
if [[ ${SUDO_USER:-} ]] ; then chown -R $SUDO_USER: $mnt/nix/store $mnt/nix/var ; fi
( set -x ; time nix copy --no-check-sigs --to $mnt ${topLevel:-$targetSystem} ) ; rm -rf $mnt/nix/var/nix/gcroots
# TODO: if the target has @{config.nix.autoOptimiseStore} and the host doesn't (there is no .links dir?), optimize now
if [[ ${SUDO_USER:-} ]] ; then chown -R root:root $mnt/nix $mnt/nix/var ; chown :30000 $mnt/nix/store ; fi
# Link/create files that some tooling expects: # Link/create files that some tooling expects:
mkdir -p -m 755 $mnt/nix/var/nix ; mkdir -p -m 1775 $mnt/nix/store
mkdir -p $mnt/etc $mnt/run ; mkdir -p -m 1777 $mnt/tmp mkdir -p $mnt/etc $mnt/run ; mkdir -p -m 1777 $mnt/tmp
mount tmpfs -t tmpfs $mnt/run ; prepend_trap "umount -l $mnt/run" EXIT # If there isn't anything mounted here, »activate« will mount a tmpfs (inside »nixos-enter«'s private mount namespace). That would hide the additions below. mount tmpfs -t tmpfs $mnt/run ; prepend_trap "umount -l $mnt/run" EXIT # If there isn't anything mounted here, »activate« will mount a tmpfs (inside »nixos-enter«'s private mount namespace). That would hide the additions below.
[[ -e $mnt/etc/NIXOS ]] || touch $mnt/etc/NIXOS # for »switch-to-configuration« [[ -e $mnt/etc/NIXOS ]] || touch $mnt/etc/NIXOS # for »switch-to-configuration«
@ -80,19 +62,32 @@ function install-system-to {( set -eu # 1: mnt, 2?: topLevel
ln -sT $(realpath $targetSystem) $mnt/run/current-system ln -sT $(realpath $targetSystem) $mnt/run/current-system
#mkdir -p /nix/var/nix/db # »nixos-containers« requires this but nothing creates it before nix is used. BUT »nixos-enter« screams: »/nix/var/nix/db exists and is not a regular file.« #mkdir -p /nix/var/nix/db # »nixos-containers« requires this but nothing creates it before nix is used. BUT »nixos-enter« screams: »/nix/var/nix/db exists and is not a regular file.«
# If the system configuration is supposed to be somewhere on the system, might as well initialize that:
if [[ @{config.environment.etc.nixos.source:-} && @{config.environment.etc.nixos.source} != /nix/store/* && @{config.environment.etc.nixos.source} != /run/current-system/config && ! -e $mnt/@{config.environment.etc.nixos.source} && -e $targetSystem/config ]] ; then
mkdir -p -- $mnt/@{config.environment.etc.nixos.source} ; cp -at $mnt/@{config.environment.etc.nixos.source} -- $targetSystem/config/*
chown -R 0:0 $mnt/@{config.environment.etc.nixos.source} ; chmod -R u+w $mnt/@{config.environment.etc.nixos.source}
fi
# Set this as the initial system generation: # Set this as the initial system generation:
mkdir -p -m 755 $mnt/nix/var/nix/profiles ; ln -sT $(realpath $targetSystem) $mnt/nix/var/nix/profiles/system-1-link ; ln -sT system-1-link $mnt/nix/var/nix/profiles/system mkdir -p -m 755 $mnt/nix/var/nix/profiles ; ln -sT $(realpath $targetSystem) $mnt/nix/var/nix/profiles/system-1-link ; ln -sT system-1-link $mnt/nix/var/nix/profiles/system
# Support cross architecture installation (not sure if this is actually required) # Support cross architecture installation (not sure if this is actually required)
if [[ $(cat /run/current-system/system 2>/dev/null || echo "x86_64-linux") != "@{config.wip.preface.hardware}"-linux ]] ; then if [[ $(cat /run/current-system/system 2>/dev/null || echo "x86_64-linux") != "@{config.wip.preface.hardware}"-linux ]] ; then
mkdir -p $mnt/run/binfmt ; cp -a {,$mnt}/run/binfmt/"@{config.wip.preface.hardware}"-linux || true mkdir -p $mnt/run/binfmt ; [[ ! -e /run/binfmt/"@{config.wip.preface.hardware}"-linux ]] || cp -a {,$mnt}/run/binfmt/"@{config.wip.preface.hardware}"-linux
# Ubuntu (by default) expects the "interpreter" at »/usr/bin/qemu-@{config.wip.preface.hardware}-static«. # Ubuntu (by default) expects the "interpreter" at »/usr/bin/qemu-@{config.wip.preface.hardware}-static«.
fi fi
# Copy system closure to new nix store:
if [[ ${SUDO_USER:-} ]] ; then chown -R $SUDO_USER: $mnt/nix/store $mnt/nix/var ; fi
( set -x ; time nix --extra-experimental-features nix-command copy --no-check-sigs --to $mnt ${topLevel:-$targetSystem} ) ; rm -rf $mnt/nix/var/nix/gcroots
# TODO: if the target has @{config.nix.autoOptimiseStore} and the host doesn't (there is no .links dir?), optimize now
if [[ ${SUDO_USER:-} ]] ; then chown -R root:root $mnt/nix $mnt/nix/var ; chown :30000 $mnt/nix/store ; fi
# Run the main install command (primarily for the bootloader): # Run the main install command (primarily for the bootloader):
mount -o bind,ro /nix/store $mnt/nix/store ; prepend_trap '! mountpoint -q $mnt/nix/store || umount -l $mnt/nix/store' EXIT # all the things required to _run_ the system are copied, but (may) need some more things to initially install it mount -o bind,ro /nix/store $mnt/nix/store ; prepend_trap '! mountpoint -q $mnt/nix/store || umount -l $mnt/nix/store' EXIT # all the things required to _run_ the system are copied, but (may) need some more things to initially install it and/or enter the chroot (like qemu, see above)
run-hook-script 'Pre Installation' @{config.wip.fs.disks.preInstallCommands!writeText.preInstallCommands}
code=0 ; nixos-install-cmd $mnt "${topLevel:-$targetSystem}" || code=$? code=0 ; nixos-install-cmd $mnt "${topLevel:-$targetSystem}" || code=$?
#umount -l $mnt/nix/store # »nixos-enter« below still needs the bind mount, if installing cross-arch run-hook-script 'post Installation' @{config.wip.fs.disks.postInstallCommands!writeText.postInstallCommands}
# Done! # Done!
if [[ ! ${args[no-inspect]:-} ]] ; then if [[ ! ${args[no-inspect]:-} ]] ; then
@ -101,7 +96,7 @@ function install-system-to {( set -eu # 1: mnt, 2?: topLevel
else else
( set +x ; echo "Installation done! This shell is in a chroot in the mounted system for inspection. Exiting the shell will unmount the system." ) ( set +x ; echo "Installation done! This shell is in a chroot in the mounted system for inspection. Exiting the shell will unmount the system." )
fi fi
PATH=@{config.systemd.package}/bin:$PATH nixos-enter --root $mnt PATH=@{config.systemd.package}/bin:$PATH @{native.nixos-install-tools}/bin/nixos-enter --root $mnt # TODO: construct path as it would be at login
#( cd $mnt ; mnt=$mnt @{native.bashInteractive}/bin/bash --init-file @{config.environment.etc.bashrc.source} ) #( cd $mnt ; mnt=$mnt @{native.bashInteractive}/bin/bash --init-file @{config.environment.etc.bashrc.source} )
elif (( code != 0 )) ; then elif (( code != 0 )) ; then
exit $code exit $code

View File

@ -34,16 +34,16 @@ function populate-keystore { { # (void)
methods[$usage]=${methods[$from]} ; options[$usage]=${options[$from]} methods[$usage]=${methods[$from]} ; options[$usage]=${options[$from]}
done done
for usage in "${!methods[@]}" ; do for usage in "${!methods[@]}" ; do
if [[ "${methods[$usage]}" == home-pw || "${methods[$usage]}" == copy ]] ; then continue ; fi if [[ "${methods[$usage]}" == home-composite || "${methods[$usage]}" == copy ]] ; then continue ; fi
add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" || return 1 gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key || return 1
done done
for usage in "${!methods[@]}" ; do for usage in "${!methods[@]}" ; do
if [[ "${methods[$usage]}" != home-pw ]] ; then continue ; fi if [[ "${methods[$usage]}" != home-composite ]] ; then continue ; fi
add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" || return 1 gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key || return 1
done done
for usage in "${!methods[@]}" ; do for usage in "${!methods[@]}" ; do
if [[ "${methods[$usage]}" != copy ]] ; then continue ; fi if [[ "${methods[$usage]}" != copy ]] ; then continue ; fi
add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" || return 1 gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key || return 1
done done
)} )}

View File

@ -26,7 +26,7 @@ function register-vbox {( set -eu # 1: diskImages, 2?: bridgeTo
$VBoxManage modifyvm "$vmName" --nic2 bridged --bridgeadapter2 $bridgeTo $VBoxManage modifyvm "$vmName" --nic2 bridged --bridgeadapter2 $bridgeTo
fi fi
# TODO: The serial settings between qemu and vBox seem incompatible. With a simple »console=ttyS0«, vBox hangs on start. So just disable this for now an use qemu for headless setups. The UX here is awful anyway. # The serial settings between qemu and vBox seem incompatible. With a simple »console=ttyS0«, vBox hangs on start. So just disable this for now an use qemu for headless setups. The UX here is awful anyway.
#$VBoxManage modifyvm "$vmName" --uart1 0x3F8 4 --uartmode1 server /run/user/$(id -u)/$vmName.socket # (guest sets speed) #$VBoxManage modifyvm "$vmName" --uart1 0x3F8 4 --uartmode1 server /run/user/$(id -u)/$vmName.socket # (guest sets speed)
set +x # avoid double-echoing set +x # avoid double-echoing
@ -63,8 +63,14 @@ function run-qemu {( set -eu # 1: diskImages
#qemu+=( -M virt -m 1024 -smp 4 -cpu cortex-a53 ) ; args[no-nat]=1 #qemu+=( -M virt -m 1024 -smp 4 -cpu cortex-a53 ) ; args[no-nat]=1
fi # else things are going to be quite slow fi # else things are going to be quite slow
for decl in ${diskImages//:/ } ; do disks=( ${diskImages//:/ } ) ; for index in ${!disks[@]} ; do
qemu+=( -drive format=raw,file="${decl/*=/}" ) #,if=none,index=0,media=disk,id=disk0 -device "virtio-blk-pci,drive=disk0,disable-modern=on,disable-legacy=off" ) # qemu+=( -drive format=raw,if=ide,file="${disks[$index]/*=/}" ) # »if=ide« is the default, which these days isn't great for driver support inside the VM
qemu+=( # not sure how correct the interpretations if the command are, and whether this works for more than one disk
-drive format=raw,file="${disks[$index]/*=/}",media=disk,if=none,index=${index},id=drive${index} # create the disk drive, without attaching it, name it driveX
-device ahci,acpi-index=${index},id=ahci${index} # create an (ich9-)AHCI bus named »ahciX«
-device ide-hd,drive=drive${index},bus=ahci${index}.${index} # attach IDE?! disk driveX as device X on bus »ahciX«
#-device virtio-blk-pci,drive=drive${index},disable-modern=on,disable-legacy=off # alternative to the two lines above (implies to be faster, but seems to require guest drivers)
)
done done
if [[ @{config.boot.loader.systemd-boot.enable} || ${args[efi]:-} ]] ; then # UEFI. Otherwise it boots something much like a classic BIOS? if [[ @{config.boot.loader.systemd-boot.enable} || ${args[efi]:-} ]] ; then # UEFI. Otherwise it boots something much like a classic BIOS?
@ -78,11 +84,18 @@ function run-qemu {( set -eu # 1: diskImages
qemu+=( -kernel @{config.system.build.kernel}/Image -initrd @{config.system.build.initialRamdisk}/initrd -append "$(echo -n "@{config.boot.kernelParams[@]}")" ) qemu+=( -kernel @{config.system.build.kernel}/Image -initrd @{config.system.build.initialRamdisk}/initrd -append "$(echo -n "@{config.boot.kernelParams[@]}")" )
fi fi
for param in "@{config.boot.kernelParams[@]}" ; do if [[ $param == 'console=ttyS0' || $param == 'console=ttyS0',* ]] ; then # Add »config.boot.kernelParams = [ "console=tty1" "console=ttyS0" ]« to log to serial (»ttyS0«) and/or the display (»tty1«), preferring the last »console« option for the initrd shell (if enabled and requested).
qemu+=( -nographic ) # »-nographic« by default only shows output once th system reaches the login prompt. Add »config.boot.kernelParams = [ "console=tty1" "console=ttyS0" ]« to log to serial (»-nographic«) and the display (if there is one), preferring the last »console« option for the initrd shell (if enabled and requested). logSerial= ; if [[ ' '"@{config.boot.kernelParams[@]}"' ' == *' console=ttyS0'@( |,)* ]] ; then logSerial=1 ; fi
fi ; done logScreen= ; if [[ ' '"@{config.boot.kernelParams[@]}"' ' == *' console=tty1 '* ]] ; then logScreen=1 ; fi
if [[ $logSerial ]] ; then
if [[ $logScreen || ${args[graphic]:-} ]] ; then
qemu+=( -serial mon:stdio )
else
qemu+=( -nographic ) # Without »console=tty1« or no »console=...« parameter, boot messages won't be on the screen.
fi
fi
if [[ ! ${args[no-nat]:-} ]] ; then if [[ ! ${args[no-nat]:-} ]] ; then # e.g. --nat-fw=8000-:8000,8001-:8001
qemu+=( -nic user,model=virtio-net-pci${args[nat-fw]:+,hostfwd=tcp::${args[nat-fw]//,/,hostfwd=tcp::}} ) # NATed, IPs: 10.0.2.15+/32, gateway: 10.0.2.2 qemu+=( -nic user,model=virtio-net-pci${args[nat-fw]:+,hostfwd=tcp::${args[nat-fw]//,/,hostfwd=tcp::}} ) # NATed, IPs: 10.0.2.15+/32, gateway: 10.0.2.2
fi fi

View File

@ -33,6 +33,11 @@ function prepend_trap { # 1: command, ...: trapNames
} }
declare -f -t prepend_trap # required to modify DEBUG or RETURN traps declare -f -t prepend_trap # required to modify DEBUG or RETURN traps
## Given the name to an existing bash function, this creates a copy of that function with a new name (in the current scope).
function copy-function { # 1: existingName, 2: newName
local original=$(declare -f "${1?existingName not provided}") ; if [[ ! $original ]] ; then echo "Function $1 is not defined" ; return 1 ; fi
eval "${original/$1/${2?newName not provided}}" # run the code declaring the function again, replacing only the first occurrence of the name
}
## Writes a »$name«d secret from stdin to »$targetDir«, ensuring proper file permissions. ## Writes a »$name«d secret from stdin to »$targetDir«, ensuring proper file permissions.
function write-secret {( set -eu # 1: path, 2?: owner[:[group]], 3?: mode function write-secret {( set -eu # 1: path, 2?: owner[:[group]], 3?: mode

View File

@ -1,6 +1,4 @@
# These functions have »pkgs.zfs« as undeclared dependency (so that they can alternatively use initramfs' »extraUtils«).
## Creates the system's ZFS pools and their datasets. ## Creates the system's ZFS pools and their datasets.
function create-zpools { # 1: mnt function create-zpools { # 1: mnt
local mnt=$1 ; local poolName ; for poolName in "@{!config.wip.fs.zfs.pools[@]}" ; do ( set -eu local mnt=$1 ; local poolName ; for poolName in "@{!config.wip.fs.zfs.pools[@]}" ; do ( set -eu
@ -22,9 +20,9 @@ function create-zpools { # 1: mnt
if ! is-partition-on-disks "$part" "${blockDevs[@]}" ; then echo "Partition alias $part used by zpool ${pool[name]} does not point at one of the target disks ${blockDevs[@]}" ; exit 1 ; fi if ! is-partition-on-disks "$part" "${blockDevs[@]}" ; then echo "Partition alias $part used by zpool ${pool[name]} does not point at one of the target disks ${blockDevs[@]}" ; exit 1 ; fi
fi fi
done done
( set -x ; zpool create "${args[@]}" -R "$mnt" "${pool[name]}" "${vdevs[@]}" ) ( PATH=@{native.zfs}/bin ; set -x ; zpool create "${args[@]}" -R "$mnt" "${pool[name]}" "${vdevs[@]}" )
) && { ) && {
prepend_trap "zpool export '$poolName'" EXIT prepend_trap "@{native.zfs}/bin/zpool export '$poolName'" EXIT
} ; done && } ; done &&
ensure-datasets $mnt ensure-datasets $mnt
@ -38,6 +36,7 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp
mnt=$1 ; while [[ "$mnt" == */ ]] ; do mnt=${mnt:0:(-1)} ; done # (remove any tailing slashes) mnt=$1 ; while [[ "$mnt" == */ ]] ; do mnt=${mnt:0:(-1)} ; done # (remove any tailing slashes)
filterExp=${2:-'^'} filterExp=${2:-'^'}
tmpMnt=$(mktemp -d) ; trap "rmdir $tmpMnt" EXIT tmpMnt=$(mktemp -d) ; trap "rmdir $tmpMnt" EXIT
zfs=@{native.zfs}/bin/zfs
: 'Step-through is very verbose and breaks the loop, disabling it for this function' ; trap - debug : 'Step-through is very verbose and breaks the loop, disabling it for this function' ; trap - debug
printf '%s\0' "@{!config.wip.fs.zfs.datasets[@]}" | LC_ALL=C sort -z | while IFS= read -r -d $'\0' name ; do printf '%s\0' "@{!config.wip.fs.zfs.datasets[@]}" | LC_ALL=C sort -z | while IFS= read -r -d $'\0' name ; do
@ -49,10 +48,10 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp
explicitKeylocation=${props[keylocation]:-} explicitKeylocation=${props[keylocation]:-}
get-zfs-crypt-props "${dataset[name]}" props cryptKey cryptRoot get-zfs-crypt-props "${dataset[name]}" props cryptKey cryptRoot
if zfs get -o value -H name "${dataset[name]}" &>/dev/null ; then # dataset exists: check its properties if $zfs get -o value -H name "${dataset[name]}" &>/dev/null ; then # dataset exists: check its properties
if [[ ${props[mountpoint]:-} ]] ; then # don't set the current mount point again (no-op), cuz that fails if the dataset is mounted if [[ ${props[mountpoint]:-} ]] ; then # don't set the current mount point again (no-op), cuz that fails if the dataset is mounted
current=$(zfs get -o value -H mountpoint "${dataset[name]}") ; current=${current/$mnt/} current=$($zfs get -o value -H mountpoint "${dataset[name]}") ; current=${current/$mnt/}
if [[ ${props[mountpoint]} == "${current:-/}" ]] ; then unset props[mountpoint] ; fi if [[ ${props[mountpoint]} == "${current:-/}" ]] ; then unset props[mountpoint] ; fi
fi fi
if [[ ${props[keyformat]:-} == ephemeral ]] ; then if [[ ${props[keyformat]:-} == ephemeral ]] ; then
@ -61,54 +60,54 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp
if [[ $explicitKeylocation ]] ; then props[keylocation]=$explicitKeylocation ; fi if [[ $explicitKeylocation ]] ; then props[keylocation]=$explicitKeylocation ; fi
unset props[encryption] ; unset props[keyformat] # can't change these anyway unset props[encryption] ; unset props[keyformat] # can't change these anyway
names=$(IFS=, ; echo "${!props[*]}") ; values=$(IFS=$'\n' ; echo "${props[*]}") names=$(IFS=, ; echo "${!props[*]}") ; values=$(IFS=$'\n' ; echo "${props[*]}")
if [[ $values != "$(zfs get -o value -H "$names" "${dataset[name]}")" ]] ; then ( if [[ $values != "$($zfs get -o value -H "$names" "${dataset[name]}")" ]] ; then (
declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( "${name}=${props[$name]}" ) ; done declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( "${name}=${props[$name]}" ) ; done
( set -x ; zfs set "${args[@]}" "${dataset[name]}" ) ( PATH=@{native.zfs}/bin ; set -x ; zfs set "${args[@]}" "${dataset[name]}" )
) ; fi ) ; fi
if [[ $cryptRoot && $(zfs get -o value -H encryptionroot "${dataset[name]}") != "$cryptRoot" ]] ; then ( # inherit key from parent (which the parent would also already have done if necessary) if [[ $cryptRoot && $($zfs get -o value -H encryptionroot "${dataset[name]}") != "$cryptRoot" ]] ; then ( # inherit key from parent (which the parent would also already have done if necessary)
if [[ $(zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then if [[ $($zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then
zfs load-key -L file://"$cryptKey" "$cryptRoot" ; trap "zfs unload-key $cryptRoot || true" EXIT $zfs load-key -L file://"$cryptKey" "$cryptRoot" ; trap "$zfs unload-key $cryptRoot || true" EXIT
fi fi
if [[ $(zfs get -o value -H keystatus "${dataset[name]}") != available ]] ; then if [[ $($zfs get -o value -H keystatus "${dataset[name]}") != available ]] ; then
zfs load-key -L file://"$cryptKey" "${dataset[name]}" # will unload with cryptRoot $zfs load-key -L file://"$cryptKey" "${dataset[name]}" # will unload with cryptRoot
fi fi
( set -x ; zfs change-key -i "${dataset[name]}" ) ( PATH=@{native.zfs}/bin ; set -x ; zfs change-key -i "${dataset[name]}" )
) ; fi ) ; fi
else ( # create dataset else ( # create dataset
if [[ ${props[keyformat]:-} == ephemeral ]] ; then if [[ ${props[keyformat]:-} == ephemeral ]] ; then
props[encryption]=aes-256-gcm ; props[keyformat]=hex ; props[keylocation]=file:///dev/stdin ; explicitKeylocation=file:///dev/null props[encryption]=aes-256-gcm ; props[keyformat]=hex ; props[keylocation]=file:///dev/stdin ; explicitKeylocation=file:///dev/null
declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done
</dev/urandom tr -dc 0-9a-f | head -c 64 | ( set -x ; zfs create "${args[@]}" "${dataset[name]}" ) </dev/urandom tr -dc 0-9a-f | head -c 64 | ( PATH=@{native.zfs}/bin ; set -x ; zfs create "${args[@]}" "${dataset[name]}" )
zfs unload-key "${dataset[name]}" $zfs unload-key "${dataset[name]}"
else else
if [[ $cryptRoot && $cryptRoot != ${dataset[name]} && $(zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then if [[ $cryptRoot && $cryptRoot != ${dataset[name]} && $($zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then
zfs load-key -L file://"$cryptKey" "$cryptRoot" ; trap "zfs unload-key $cryptRoot || true" EXIT $zfs load-key -L file://"$cryptKey" "$cryptRoot" ; trap "$zfs unload-key $cryptRoot || true" EXIT
fi fi
declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done
( set -x ; zfs create "${args[@]}" "${dataset[name]}" ) ( PATH=@{native.zfs}/bin ; set -x ; zfs create "${args[@]}" "${dataset[name]}" )
fi fi
if [[ ${props[canmount]} != off ]] ; then ( if [[ ${props[canmount]} != off ]] ; then (
mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt ; trap "umount '${dataset[name]}'" EXIT mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt ; trap "umount '${dataset[name]}'" EXIT
chmod 000 "$tmpMnt" ; ( chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" ; chmod "${dataset[mode]}" -- "$tmpMnt" ) chmod 000 "$tmpMnt" ; ( chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" ; chmod "${dataset[mode]}" -- "$tmpMnt" )
) ; fi ) ; fi
if [[ $explicitKeylocation && $explicitKeylocation != "${props[keylocation]:-}" ]] ; then if [[ $explicitKeylocation && $explicitKeylocation != "${props[keylocation]:-}" ]] ; then
( set -x ; zfs set keylocation="$explicitKeylocation" "${dataset[name]}" ) ( PATH=@{native.zfs}/bin ; set -x ; zfs set keylocation="$explicitKeylocation" "${dataset[name]}" )
fi fi
zfs snapshot -r "${dataset[name]}"@empty $zfs snapshot -r "${dataset[name]}"@empty
) ; fi ) ; fi
eval 'declare -A allows='"${dataset[permissions]}" eval 'declare -A allows='"${dataset[permissions]}"
for who in "${!allows[@]}" ; do for who in "${!allows[@]}" ; do
# »zfs allow $dataset« seems to be the only way to view permissions, and that is not very parsable -.- # »zfs allow $dataset« seems to be the only way to view permissions, and that is not very parsable -.-
( set -x ; zfs allow -$who "${allows[$who]}" "${dataset[name]}" >&2 ) ( PATH=@{native.zfs}/bin ; set -x ; zfs allow -$who "${allows[$who]}" "${dataset[name]}" >&2 )
done done
done done
)} )}
## TODO ## Given the name (»datasetPath«) of a ZFS dataset, this deducts crypto-related options from the declared keys (»config.wip.fs.keystore.keys."zfs/..."«).
function get-zfs-crypt-props { # 1: datasetPath, 2: name_cryptProps, 3: name_cryptKey, 4: name_cryptRoot function get-zfs-crypt-props { # 1: datasetPath, 2: name_cryptProps, 3: name_cryptKey, 4: name_cryptRoot
local hash=@{config.networking.hostName!hashString.sha256:0:8} local hash=@{config.networking.hostName!hashString.sha256:0:8}
local keystore=/run/keystore-$hash local keystore=/run/keystore-$hash

View File

@ -75,9 +75,8 @@ in rec {
captures = map ctxify (lib.sublist 3 (builtins.length matches) matches); captures = map ctxify (lib.sublist 3 (builtins.length matches) matches);
after = ctxify (get 2); after = ctxify (get 2);
without = ctxify (before + after); without = ctxify (before + after);
}; # (TODO: The string context stuff is actually required, but why? Shouldn't »builtins.split« propagate the context?) }; # (The string context stuff is actually required, but why? Shouldn't »builtins.split« propagate the context?)
extractLine = exp: text: extractLineAnchored exp false false text; extractLine = exp: text: extractLineAnchored exp false false text;
#extractLine = exp: text: let split = builtins.split "([^\n]*${exp}[^\n]*\n)" (builtins.unsafeDiscardStringContext (if (lastChar text) == "\n" then text else text + "\n")); get = builtins.elemAt split; ctxify = str: lib.addContextFrom text str; in if builtins.length split != 3 then null else rec { before = ctxify (get 0); line = ctxify (builtins.head (get 1)); captures = map ctxify (builtins.tail (get 1)); after = ctxify (get 2); without = ctxify (before + after); }; # (TODO: The string context stuff is actually required, but why? Shouldn't »builtins.split« propagate the context?)
# Given a string, returns its first/last char (or last utf-8(?) byte?). # Given a string, returns its first/last char (or last utf-8(?) byte?).
firstChar = string: builtins.substring (0) 1 string; firstChar = string: builtins.substring (0) 1 string;

View File

@ -17,6 +17,8 @@ in {
options.${prefix} = { base = { options.${prefix} = { base = {
enable = lib.mkEnableOption "saner defaults"; enable = lib.mkEnableOption "saner defaults";
includeInputs = lib.mkOption { description = "Whether to include all build inputs to the configuration in the final system, such that they are available for self-rebuilds, in the flake registry, and on the »NIX_PATH« entry (e.g. as »pkgs« on the CLI)."; type = lib.types.bool; default = specialArgs?inputs && specialArgs.inputs?self && specialArgs.inputs?nixpkgs; }; includeInputs = lib.mkOption { description = "Whether to include all build inputs to the configuration in the final system, such that they are available for self-rebuilds, in the flake registry, and on the »NIX_PATH« entry (e.g. as »pkgs« on the CLI)."; type = lib.types.bool; default = specialArgs?inputs && specialArgs.inputs?self && specialArgs.inputs?nixpkgs; };
panic_on_fail = lib.mkEnableOption "Kernel parameter »boot.panic_on_fail«" // { default = true; }; # It's stupidly hard to remove items from lists ...
makeNoExec = lib.mkEnableOption "(almost) all filesystems being mounted as »noexec« (and »nosuid« and »nodev«)" // { default = false; };
}; }; }; };
# Bugfix: # Bugfix:
@ -36,10 +38,54 @@ in {
documentation.man.enable = lib.mkDefault config.documentation.enable; documentation.man.enable = lib.mkDefault config.documentation.enable;
}) (lib.mkIf cfg.makeNoExec { ## Hardening
# This was the only "special" mount that did not have »nosuid« and »nodev« set:
systemd.packages = [ (lib.wip.mkSystemdOverride pkgs "dev-hugepages.mount" "[Mount]\nOptions=nosuid,nodev,noexec\n") ];
# And these were missing »noexec«:
boot.specialFileSystems."/dev".options = [ "noexec" ];
boot.specialFileSystems."/dev/shm".options = [ "noexec" ];
boot.specialFileSystems."/run/keys".options = [ "noexec" ];
# "Exceptions":
# /dev /dev/pts need »dev«
# /run/wrappers needs »exec« »suid«
# /run/binfmt needs »exec«
# /run /run/user/* may need »exec« (TODO: test)
# Ensure that the /nix/store is not »noexec«, even if the FS it is on is:
boot.initrd.postMountCommands = ''
if ! mountpoint -q $targetRoot/nix/store ; then
mount --bind $targetRoot/nix/store $targetRoot/nix/store
fi
mount -o remount,exec $targetRoot/nix/store
'';
# Nix has no (direct) settings to change where the builders have their »/build« bound to, but many builds will need it to be »exec«:
systemd.services.nix-daemon = { # TODO: while noexec on /tmp is the problem, neither of this solve it:
serviceConfig.PrivateTmp = true;
#serviceConfig.PrivateMounts = true; serviceConfig.ExecStartPre = "/run/wrappers/bin/mount -o remount,exec /tmp";
};
# And now make all "real" FSs »noexec« (if »wip.fs.temproot.enable = true«):
${prefix}.fs.temproot = let
it = { mountOptions = { nosuid = true; noexec = true; nodev = true; }; };
in { temp = it; local = it; remote = it; };
nix.allowedUsers = [ "root" "@wheel" ]; # This goes hand-in-hand with setting mounts as »noexec«. Cases where a user other than root should build stuff are probably fairly rare. A "real" user might want to, but that is either already in the wheel(sudo) group, or explicitly adding that user is pretty reasonable.
boot.postBootCommands = ''
# Make the /nix/store non-iterable, to make it harder for unprivileged programs to search the store for programs they should not have access to:
unshare --fork --mount --uts --mount-proc --pid -- ${pkgs.bash}/bin/bash -euc '
mount --make-rprivate / ; mount --bind /nix/store /nix/store ; mount -o remount,rw /nix/store
chmod -f 1771 /nix/store
chmod -f 751 /nix/store/.links
'
'';
}) ({ }) ({
# Robustness/debugging: # Robustness/debugging:
boot.kernelParams = [ "panic=10" "boot.panic_on_fail" ]; # Reboot on kernel panic (showing the printed messages for 10s), panic if boot fails. boot.kernelParams = [ "panic=10" ] ++ (lib.optional cfg.panic_on_fail "boot.panic_on_fail"); # Reboot on kernel panic (showing the printed messages for 10s), panic if boot fails.
# might additionally want to do this: https://stackoverflow.com/questions/62083796/automatic-reboot-on-systemd-emergency-mode # might additionally want to do this: https://stackoverflow.com/questions/62083796/automatic-reboot-on-systemd-emergency-mode
systemd.extraConfig = "StatusUnitFormat=name"; # Show unit names instead of descriptions during boot. systemd.extraConfig = "StatusUnitFormat=name"; # Show unit names instead of descriptions during boot.
@ -55,7 +101,6 @@ in {
''; # (use this indirection so that all open shells update automatically) ''; # (use this indirection so that all open shells update automatically)
nix.nixPath = [ "nixpkgs=/run/current-system/pkgs" ]; # this intentionally replaces the defaults: nixpkgs is here, /etc/nixos/flake.nix is implicit, channels are impure nix.nixPath = [ "nixpkgs=/run/current-system/pkgs" ]; # this intentionally replaces the defaults: nixpkgs is here, /etc/nixos/flake.nix is implicit, channels are impure
# TODO: decide whether to put any other flake inputs also on the nix path: con: they may very well not even have a »./default.nix«
nix.autoOptimiseStore = true; # because why not ... nix.autoOptimiseStore = true; # because why not ...
environment.shellAliases = { "with" = ''nix-shell --run "bash --login" -p''; }; # »with« doesn't seem to be a common linux command yet, and it makes sense here: with $package => do stuff in shell environment.shellAliases = { "with" = ''nix-shell --run "bash --login" -p''; }; # »with« doesn't seem to be a common linux command yet, and it makes sense here: with $package => do stuff in shell

View File

@ -32,7 +32,7 @@ in {
a;1 # active/boot ; part1 a;1 # active/boot ; part1
''; }; }; ''; }; };
}; };
fileSystems.${cfg.mountpoint} = { fsType = "vfat"; device = "/dev/disk/by-partlabel/boot-${hash}"; neededForBoot = true; options = [ "noatime" "umask=0022" ]; formatOptions = "-F 32"; }; fileSystems.${cfg.mountpoint} = { fsType = "vfat"; device = "/dev/disk/by-partlabel/boot-${hash}"; neededForBoot = true; options = [ "nosuid" "nodev" "noexec" "noatime" "umask=0022" ]; formatOptions = "-F 32"; };
}) ]); }) ]);

View File

@ -12,6 +12,7 @@ Options to declare devices and partitions to be picked up by the installer scrip
dirname: inputs: { config, pkgs, lib, ... }: let inherit (inputs.self) lib; in let dirname: inputs: { config, pkgs, lib, ... }: let inherit (inputs.self) lib; in let
prefix = inputs.config.prefix; prefix = inputs.config.prefix;
cfg = config.${prefix}.fs.disks; cfg = config.${prefix}.fs.disks;
globalConfig = config;
types.guid = lib.types.strMatching ''^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$''; types.guid = lib.types.strMatching ''^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$'';
in { in {
@ -20,8 +21,9 @@ in {
description = "Set of disk devices that this host will be installed on."; description = "Set of disk devices that this host will be installed on.";
type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = { type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = {
name = lib.mkOption { description = "Name that this device is being referred to as in other places."; type = lib.types.str; default = name; readOnly = true; }; name = lib.mkOption { description = "Name that this device is being referred to as in other places."; type = lib.types.str; default = name; readOnly = true; };
guid = lib.mkOption { description = "GPT disk GUID of the disk."; type = types.guid; default = lib.wip.sha256guid ("gpt-disk:${name}"+":${config.networking.hostName}"); }; guid = lib.mkOption { description = "GPT disk GUID of the disk."; type = types.guid; default = lib.wip.sha256guid ("gpt-disk:${name}"+":${globalConfig.networking.hostName}"); };
size = lib.mkOption { description = "The size of the disk, either as number in bytes or as argument to »parseSizeSuffix«. When installing to a physical device, its size must match; images are created with this size."; type = lib.types.either lib.types.ints.unsigned lib.types.str; apply = lib.wip.parseSizeSuffix; default = "16G"; }; size = lib.mkOption { description = "The size of the disk, either as number in bytes or as argument to »parseSizeSuffix«. When installing to a physical device, its size must match; images are created with this size."; type = lib.types.either lib.types.ints.unsigned lib.types.str; apply = lib.wip.parseSizeSuffix; default = "16G"; };
allowLarger = lib.mkOption { description = "Whether to allow installation to a physical disk that is larger than the declared size."; type = lib.types.bool; default = true; };
serial = lib.mkOption { description = "Serial number of the specific hardware device to use. If set the device path passed to the installer must point to the device with this serial. Use » udevadm info --query=property --name=$DISK | grep -oP 'ID_SERIAL_SHORT=\K.*' || echo '<none>' « to get the serial."; type = lib.types.nullOr lib.types.str; default = null; }; serial = lib.mkOption { description = "Serial number of the specific hardware device to use. If set the device path passed to the installer must point to the device with this serial. Use » udevadm info --query=property --name=$DISK | grep -oP 'ID_SERIAL_SHORT=\K.*' || echo '<none>' « to get the serial."; type = lib.types.nullOr lib.types.str; default = null; };
alignment = lib.mkOption { description = "Default alignment quantifier for partitions on this device. Should be at least the optimal physical write size of the device, but going larger at worst wastes this many times the number of partitions disk sectors."; type = lib.types.int; default = 16384; }; alignment = lib.mkOption { description = "Default alignment quantifier for partitions on this device. Should be at least the optimal physical write size of the device, but going larger at worst wastes this many times the number of partitions disk sectors."; type = lib.types.int; default = 16384; };
gptOffset = lib.mkOption { description = "Offset of the partition tables, inwards from where (third / 2nd last) they usually are."; type = lib.types.ints.unsigned; default = 0; }; gptOffset = lib.mkOption { description = "Offset of the partition tables, inwards from where (third / 2nd last) they usually are."; type = lib.types.ints.unsigned; default = 0; };
@ -31,21 +33,21 @@ in {
a;1 # active/boot ; part1 a;1 # active/boot ; part1
''; }; ''; };
}; }))); }; })));
default = { }; default = { primary = { }; };
apply = lib.filterAttrs (k: v: v != null); apply = lib.filterAttrs (k: v: v != null);
}; };
partitions = lib.mkOption { partitions = lib.mkOption {
description = "Set of disks disk partitions that the system will need/use. Partitions will be created on their respective ».disk«s in ».order« using »sgdisk -n X:+0+$size«."; description = "Set of disks disk partitions that the system will need/use. Partitions will be created on their respective ».disk«s in ».order« using »sgdisk -n X:+0+$size«.";
type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = { type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, config, ... }: { options = {
name = lib.mkOption { description = "Name/partlabel that this partition can be referred to as once created."; type = lib.types.str; default = name; readOnly = true; }; name = lib.mkOption { description = "Name/partlabel that this partition can be referred to as once created."; type = lib.types.str; default = name; readOnly = true; };
guid = lib.mkOption { description = "GPT partition GUID of the partition."; type = types.guid; default = lib.wip.sha256guid ("gpt-part:${name}"+":${config.networking.hostName}"); }; guid = lib.mkOption { description = "GPT partition GUID of the partition."; type = types.guid; default = lib.wip.sha256guid ("gpt-part:${name}"+":${globalConfig.networking.hostName}"); };
disk = lib.mkOption { description = "Name of the disk that this partition resides on, which will automatically be declared with default options."; type = lib.types.str; default = "primary"; }; disk = lib.mkOption { description = "Name of the disk that this partition resides on, which will automatically be declared with default options."; type = lib.types.str; default = "primary"; };
type = lib.mkOption { description = "»gdisk« partition type of this partition."; type = lib.types.str; }; type = lib.mkOption { description = "»gdisk« partition type of this partition."; type = lib.types.str; };
size = lib.mkOption { description = "Partition size, either as integer suffixed with »K«, »M«, »G«, etc for sizes in XiB, or an integer suffixed with »%« for that portion of the size of the actual disk the partition gets created on. Or »null« to fill the remaining disk space."; type = lib.types.nullOr lib.types.str; default = null; }; size = lib.mkOption { description = "Partition size, either as integer suffixed with »K«, »M«, »G«, etc for sizes in XiB, or an integer suffixed with »%« for that portion of the size of the actual disk the partition gets created on. Or »null« to fill the remaining disk space."; type = lib.types.nullOr lib.types.str; default = null; };
position = lib.mkOption { description = "Position at which to create the partition. The default »+0« means the beginning of the largest free block."; type = lib.types.str; default = "+0"; }; position = lib.mkOption { description = "Position at which to create the partition. The default »+0« means the beginning of the largest free block."; type = lib.types.str; default = "+0"; };
alignment = lib.mkOption { description = "Adjusted alignment quantifier for this partition only."; type = lib.types.nullOr lib.types.int; default = null; example = 1; }; alignment = lib.mkOption { description = "Adjusted alignment quantifier for this partition only."; type = lib.types.nullOr lib.types.int; default = null; example = 1; };
index = lib.mkOption { description = "Optionally explicit partition table index to place this partition in. Use ».order« to make sure that this index hasn't been used yet.."; type = lib.types.nullOr lib.types.int; default = null; }; index = lib.mkOption { description = "Optionally explicit partition table index to place this partition in. Use ».order« to make sure that this index hasn't been used yet.."; type = lib.types.nullOr lib.types.int; default = null; };
order = lib.mkOption { description = "Creation order ranking of this partition. Higher orders will be created first, and will thus be placed earlier in the partition table (if ».index« isn't explicitly set) and also further to the front of the disk space."; type = lib.types.int; default = 1000; }; order = lib.mkOption { description = "Creation order ranking of this partition. Higher orders will be created first, and will thus be placed earlier in the partition table (if ».index« isn't explicitly set) and also further to the front of the disk space."; type = lib.types.int; default = if config.size == null then 500 else (1000 - (if config.index == null then 0 else 256 - config.index)); };
}; }))); }; })));
default = { }; default = { };
apply = lib.filterAttrs (k: v: v != null); apply = lib.filterAttrs (k: v: v != null);
@ -58,13 +60,13 @@ in {
postPartitionCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; }; postPartitionCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; };
postFormatCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; }; postFormatCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; };
postMountCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; }; postMountCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; };
initSystemCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; }; preInstallCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; };
restoreSystemCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; }; postInstallCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; };
}; }; }; };
config.${prefix} = { config.${prefix} = {
# Create all devices referenced by partitions: # (Don't) create all devices referenced by partitions: (The problem with this is that all device attributes depend on the partition attributes, and it would thus be impossible to have a dependency in reverse (e.g. a partition's size based on the disk size).)
fs.disks.devices = lib.wip.mapMerge (name: { ${name} = { }; }) (lib.catAttrs "disk" config.${prefix}.fs.disks.partitionList); #fs.disks.devices = lib.genAttrs (lib.catAttrs "disk" config.${prefix}.fs.disks.partitionList) (name: { });
fs.disks.partitioning = let fs.disks.partitioning = let
partition-disk = { name = "partition-disk"; text = lib.wip.extractBashFunction (builtins.readFile lib.wip.setup-scripts.disk) "partition-disk"; }; partition-disk = { name = "partition-disk"; text = lib.wip.extractBashFunction (builtins.readFile lib.wip.setup-scripts.disk) "partition-disk"; };

View File

@ -12,11 +12,11 @@ The default functions in [`lib/setup-scripts`](../../lib/setup-scripts/README.md
What keys are used for is derived from the attribute name in the `.keys` specification, which (plus a `.key` suffix) also becomes their storage path in the keystore: What keys are used for is derived from the attribute name in the `.keys` specification, which (plus a `.key` suffix) also becomes their storage path in the keystore:
* Keys in `luks/` are used for LUKS devices, where the second path label is both the target device name and source device GPT partition label, and the third and final label is the LUKS key slot (`0` is required to be specified, `1` to `7` are optional). * Keys in `luks/` are used for LUKS devices, where the second path label is both the target device name and source device GPT partition label, and the third and final label is the LUKS key slot (`0` is required to be specified, `1` to `7` are optional).
* Keys in `zfs/` are used for ZFS datasets, where the further path is that of the dataset. Datasets implicitly inherit their parent's encryption by default. An empty key (created by method `unencrypted`) explicitly disables encryption on a dataset. Other keys are by default used with `keyformat=hex` and must thus be exactly 64 (lowercase) hex digits. * Keys in `zfs/` are used for ZFS datasets, where the further path is that of the dataset. Datasets implicitly inherit their parent's encryption by default. An empty key (created by method `unencrypted`) explicitly disables encryption on a dataset. Other keys are by default used with `keyformat=hex` and must thus be exactly 64 (lowercase) hex digits.
* Keys in `home/` are used as composites for home directory encryption, where the second and only other path label us the user name. TODO: this is not completely implemented yet. * Keys in `home/` are meant to be used as composites for home directory encryption, where the second and only other path label us the user name.
The attribute value in the `.keys` keys specification dictates how the key is acquired, primarily initially during installation, but (depending on the keys usage) also during boot unlocking, etc. The attribute value in the `.keys` keys specification dictates how the key is acquired, primarily initially during installation, but (depending on the keys usage) also during boot unlocking, etc.
The format of the key specification is `method[=args]`, where `method` is the suffix of a bash function call `gen-key-<method>` (the default functions are in [`add-key.sh`](../../lib/setup-scripts/add-key.sh), but others could be added to the installer), and `args` is the second argument to the respective function (often a `:` separated list of arguments, but some methods don't need any arguments at all). The format of the key specification is `method[=args]`, where `method` is the suffix of a bash function call `gen-key-<method>` (the default functions are in [`add-key.sh`](../../lib/setup-scripts/add-key.sh), but others could be added to the installer), and `args` is the second argument to the respective function (often a `:` separated list of arguments, but some methods don't need any arguments at all).
Most key generation methods only make sense in some key usage contexts. A `random` key is impossible to provide to unlock the keystore (which it is stored in), but is well suited to unlock other devices (if the keystore has backups (TODO!)); conversely a USB-partition can be used to headlessly unlock the keystore, but would be redundant for any further devices, as it would also be copied in the keystore. Most key generation methods only make sense in some key usage contexts. A `random` key is impossible to provide to unlock the keystore (which it is stored in), but is well suited to unlock other devices (if the keystore has backups); conversely a USB-partition can be used to headlessly unlock the keystore, but would be redundant for any further devices, as it would also be copied into the keystore.
If the module is `enable`d, a partition and LUKS device `keystore-...` gets configured and the contents of the installation time keystore is copied to it (in its entirety, including intermediate or derived keys and those unlocking the keystore itself (TODO: this could be optimized)). If the module is `enable`d, a partition and LUKS device `keystore-...` gets configured and the contents of the installation time keystore is copied to it (in its entirety, including intermediate or derived keys and those unlocking the keystore itself (TODO: this could be optimized)).
This LUKS device is then configured to be unlocked (using any ot the key methods specified for it) before anything else during boot, and closed before leaving the initramfs phase. This LUKS device is then configured to be unlocked (using any ot the key methods specified for it) before anything else during boot, and closed before leaving the initramfs phase.
@ -70,7 +70,6 @@ in let module = {
}) ({ }) ({
boot.initrd.supportedFilesystems = [ "vfat" ]; boot.initrd.supportedFilesystems = [ "vfat" ];
#boot.supportedFilesystems = [ "vfat" ]; # TODO: this should not be necessary
boot.initrd.luks.devices."keystore-${hash}" = { boot.initrd.luks.devices."keystore-${hash}" = {
device = "/dev/disk/by-partlabel/keystore-${hash}"; device = "/dev/disk/by-partlabel/keystore-${hash}";
@ -89,7 +88,7 @@ in let module = {
}; };
# Create and populate keystore during installation: # Create and populate keystore during installation:
fileSystems.${keystore} = { fsType = "vfat"; device = "/dev/mapper/keystore-${hash}"; options = [ "ro" "noatime" "umask=0277" "noauto" ]; formatOptions = ""; }; fileSystems.${keystore} = { fsType = "vfat"; device = "/dev/mapper/keystore-${hash}"; options = [ "ro" "nosuid" "nodev" "noexec" "noatime" "umask=0277" "noauto" ]; formatOptions = ""; };
${prefix} = { ${prefix} = {
fs.disks.partitions."keystore-${hash}" = { type = lib.mkDefault "8309"; order = lib.mkDefault 1375; disk = lib.mkDefault "primary"; size = lib.mkDefault "32M"; }; fs.disks.partitions."keystore-${hash}" = { type = lib.mkDefault "8309"; order = lib.mkDefault 1375; disk = lib.mkDefault "primary"; size = lib.mkDefault "32M"; };

View File

@ -48,7 +48,7 @@ let hash = builtins.substring 0 8 (builtins.hashString "sha256" config.networkin
This completely configures the disks, partitions, pool, datasets, and mounts for a ZFS `rpool` on a three-disk `raidz1` with read and write cache on an additional SSD, which also holds the boot partition and swap: This completely configures the disks, partitions, pool, datasets, and mounts for a ZFS `rpool` on a three-disk `raidz1` with read and write cache on an additional SSD, which also holds the boot partition and swap:
```nix ```nix
{ wip.fs.disks.devices.primary.size = "16G"; # The default, and only relevant when installing to disk images. The »primary« disk will hold all implicitly created partitions and those not stating a »disk«. { wip.fs.disks.devices = lib.genAttrs ([ "primary" "raidz1" "raidz2" "raidz3" ]) (name: { size = "16G"; }); # Need more than one disk, so must declare them. When installing to a physical disk, the declared size must match the actual size (or be smaller). The »primary« disk will hold all implicitly created partitions and those not stating a »disk«.
wip.fs.boot.enable = true; wip.fs.boot.size = "512M"; # See »./boot.nix.md«. Creates a FAT boot partition. wip.fs.boot.enable = true; wip.fs.boot.size = "512M"; # See »./boot.nix.md«. Creates a FAT boot partition.
wip.fs.keystore.enable = true; # See »./keystore.nix.md«. With this enabled, »remote« will automatically be encrypted, with a random key by default. wip.fs.keystore.enable = true; # See »./keystore.nix.md«. With this enabled, »remote« will automatically be encrypted, with a random key by default.
@ -113,14 +113,14 @@ dirname: inputs: specialArgs@{ config, pkgs, lib, utils, ... }: let inherit (inp
uid = lib.mkOption { description = "UID owning the mounted target."; type = lib.types.ints.unsigned; default = 0; }; uid = lib.mkOption { description = "UID owning the mounted target."; type = lib.types.ints.unsigned; default = 0; };
gid = lib.mkOption { description = "GID owning the mounted target."; type = lib.types.ints.unsigned; default = 0; }; gid = lib.mkOption { description = "GID owning the mounted target."; type = lib.types.ints.unsigned; default = 0; };
mode = lib.mkOption { description = "Permission mode of the mounted target."; type = lib.types.str; default = "750"; }; mode = lib.mkOption { description = "Permission mode of the mounted target."; type = lib.types.str; default = "750"; };
options = lib.mkOption { description = "Additional mount options to set. Note that not all mount types support all options, they may be silently ignored or cause errors. »bind« supports setting »nosuid«, »nodev«, »noexec«, »noatime«, »nodiratime«, and »relatime«. »zfs« will only heed »noauto« and otherwise use the ».zfsProps«."; type = lib.types.listOf lib.types.str; default = [ ]; }; options = lib.mkOption { description = "Additional mount options to set. Note that not all mount types support all options, they may be silently ignored or cause errors. »bind« supports setting »nosuid«, »nodev«, »noexec«, »noatime«, »nodiratime«, and »relatime«. »zfs« will explicitly heed »noauto«, the other options are applied but may conflict with the ones implied by the ».zfsProps«."; type = lib.types.attrsOf (lib.types.either lib.types.bool lib.types.str); default = { }; };
extraFsOptions = lib.mkOption { description = "Extra options to set on the generated »fileSystems.*« entry (unless this mount is forced to »null«)."; type = lib.types.attrsOf lib.types.anything; default = { }; }; extraFsConfig = lib.mkOption { description = "Extra config options to set on the generated »fileSystems.*« entry (unless this mount is forced to »null«)."; type = lib.types.attrsOf lib.types.anything; default = { }; };
zfsProps = lib.mkOption { description = "ZFS properties to set on the dataset, if mode type is »zfs«."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; }; zfsProps = lib.mkOption { description = "ZFS properties to set on the dataset, if mode type is »zfs«."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; };
}; }))); }; })));
default = { }; default = { };
apply = lib.filterAttrs (k: v: v != null); apply = lib.filterAttrs (k: v: v != null);
}; };
mountOptions = lib.mkOption { description = "Mount options that will be placed before ».mounts.*.options«."; type = lib.types.listOf lib.types.str; default = [ "noatime" "nodev" "nosuid" ]; }; mountOptions = lib.mkOption { description = "Mount options that will be merged under ».mounts.*.options«."; type = lib.types.attrsOf (lib.types.either lib.types.bool lib.types.str); default = { nosuid = true; nodev = true; noatime = true; }; };
}; };
zfsNoSyncProps = { sync = "disabled"; logbias = "throughput"; }; # According to the documentation, »logbias« should be irrelevant without sync (i.e. no logging), but some claim setting it to »throughput« still improves performance. zfsNoSyncProps = { sync = "disabled"; logbias = "throughput"; }; # According to the documentation, »logbias« should be irrelevant without sync (i.e. no logging), but some claim setting it to »throughput« still improves performance.
@ -166,6 +166,9 @@ in {
}; }; }; };
config = let config = let
optionsToList = attrs: lib.mapAttrsToList (k: v: if v == true then k else "${k}=${v}") (lib.filterAttrs (k: v: v != false) attrs);
in lib.mkIf cfg.enable (lib.mkMerge ([ ({ in lib.mkIf cfg.enable (lib.mkMerge ([ ({
${prefix} = { ${prefix} = {
@ -179,13 +182,20 @@ in {
# »/swap« is used by »cfg.swap.asPartition = false« # »/swap« is used by »cfg.swap.asPartition = false«
}; };
fs.temproot.remote.mounts = { fs.temproot.remote.mounts = {
"/remote" = { source = "system"; mode = "755"; extraFsOptions = { neededForBoot = true; }; }; # if any secrets need to be picked up by »activate«, they should be here "/remote" = { source = "system"; mode = "755"; extraFsConfig = { neededForBoot = true; }; }; # if any secrets need to be picked up by »activate«, they should be here
}; };
}; };
boot.tmpOnTmpfs = false; # This would create a systemd mount unit for »/tmp«. boot.tmpOnTmpfs = false; # This would create a systemd mount unit for »/tmp«.
}) ({ # Make each individual attribute on »wip.fs.temproot.*.mountOptions« a default, instead of having them be the default as a set:
${prefix}.fs.temproot = let
it = { mountOptions = { nosuid = lib.mkOptionDefault true; nodev = lib.mkOptionDefault true; noatime = lib.mkOptionDefault true; }; };
in { temp = it; local = it; remote = it; };
}) (lib.mkIf cfg.persistenceFixes { # Cope with the consequences of having »/« (including »/{etc,var,root,...}«) cleared on every reboot. }) (lib.mkIf cfg.persistenceFixes { # Cope with the consequences of having »/« (including »/{etc,var,root,...}«) cleared on every reboot.
environment.etc.nixos.source = "/local/etc/nixos"; environment.etc.nixos.source = "/local/etc/nixos";
@ -209,15 +219,17 @@ in {
security.sudo.extraConfig = "Defaults lecture=never"; # default is »once«, but we'd forget that we did that security.sudo.extraConfig = "Defaults lecture=never"; # default is »once«, but we'd forget that we did that
}) (lib.mkIf (cfg.swap.size != null && cfg.swap.asPartition) (let # Convenience option to create a local F2FS optimized to host the nix store: }) (lib.mkIf (cfg.swap.size != null && cfg.swap.asPartition) (let
useLuks = config.${prefix}.fs.keystore.keys?"luks/swap-${hash}/0"; useLuks = config.${prefix}.fs.keystore.keys?"luks/swap-${hash}/0";
device = "/dev/${if useLuks then "mapper" else "disk/by-partlabel"}/swap-${hash}";
in { in {
${prefix} = { ${prefix} = {
fs.disks.partitions."swap-${hash}" = { type = lib.mkDefault "8200"; size = lib.mkDefault cfg.swap.size; order = lib.mkDefault 1250; }; fs.disks.partitions."swap-${hash}" = { type = lib.mkDefault "8200"; size = lib.mkDefault cfg.swap.size; order = lib.mkDefault 1250; };
fs.keystore.keys."luks/swap-${hash}/0" = lib.mkIf cfg.swap.encrypted (lib.mkOptionDefault "random"); fs.keystore.keys."luks/swap-${hash}/0" = lib.mkIf cfg.swap.encrypted (lib.mkOptionDefault "random");
}; };
swapDevices = [ { device = "/dev/${if useLuks then "mapper" else "disk/by-partlabel"}/swap-${hash}"; } ]; swapDevices = [ { inherit device; } ];
boot.resumeDevice = device;
})) (lib.mkIf (cfg.swap.size != null && !cfg.swap.asPartition) { })) (lib.mkIf (cfg.swap.size != null && !cfg.swap.asPartition) {
@ -225,14 +237,17 @@ in {
swapDevices = [ { device = "${cfg.local.bind.source}/swap"; size = (lib.wip.parseSizeSuffix cfg.swap.size) / 1024 / 1024; } ]; swapDevices = [ { device = "${cfg.local.bind.source}/swap"; size = (lib.wip.parseSizeSuffix cfg.swap.size) / 1024 / 1024; } ];
}) (lib.mkIf (cfg.temp.type == "tmpfs") { # (only temp can be of type tmpfs) }) (lib.mkIf (cfg.temp.type == "tmpfs") (let type = "temp"; in { # (only temp can be of type tmpfs)
fileSystems = lib.mapAttrs (target: { options, uid, gid, mode, extraFsOptions, ... }: (extraFsOptions // { # TODO: this would probably be better implemented by creating a single /.temp tmpfs with a decent size restriction, and then bind-mounting all other mount points into that pool (or at least do that for any locations that are non-root writable?)
fsType = "tmpfs"; device = "tmpfs"; options = (extraFsOptions.options or [ ]) ++ cfg.temp.mountOptions ++ options ++ [ "uid=${toString uid}" "gid=${toString gid}" "mode=${mode}" ];
})) ({ "/" = { options = [ ]; uid = 0; gid = 0; mode = "755"; extraFsOptions = { }; }; } // cfg.temp.mounts); fileSystems = lib.mapAttrs (target: args@{ uid, gid, mode, extraFsConfig, ... }: (extraFsConfig // {
fsType = "tmpfs"; device = "tmpfs";
options = (extraFsConfig.options or [ ]) ++ (optionsToList (cfg.${type}.mountOptions // args.options // { uid = toString uid; gid = toString gid; mode = mode; }));
})) ({ "/" = { options = { }; uid = 0; gid = 0; mode = "755"; extraFsConfig = { }; }; } // cfg.${type}.mounts);
}) (lib.mkIf (cfg.temp.type == "zfs") { })) (lib.mkIf (cfg.temp.type == "zfs") {
boot.initrd.postDeviceCommands = lib.mkAfter '' boot.initrd.postDeviceCommands = lib.mkAfter ''
echo 'Clearing root ZFS' echo 'Clearing root ZFS'
@ -256,29 +271,31 @@ in {
type = "8300"; order = lib.mkDefault 1000; disk = lib.mkDefault "primary"; size = lib.mkDefault (if cfg.remote.type == "none" then null else "50%"); type = "8300"; order = lib.mkDefault 1000; disk = lib.mkDefault "primary"; size = lib.mkDefault (if cfg.remote.type == "none" then null else "50%");
}; };
}; };
fileSystems.${cfg.local.bind.source} = { fsType = "f2fs"; device = "/dev/${if useLuks then "mapper" else "disk/by-partlabel"}/local-${hash}"; formatOptions = lib.mkDefault (lib.concatStrings [ fileSystems.${cfg.local.bind.source} = { fsType = "f2fs"; device = "/dev/${if useLuks then "mapper" else "disk/by-partlabel"}/local-${hash}"; formatOptions = (lib.concatStrings [
"-O extra_attr" # required by other options "-O extra_attr" # required by other options
",inode_checksum" # enable inode checksum ",inode_checksum" # enable inode checksum
",sb_checksum" # enable superblock checksum ",sb_checksum" # enable superblock checksum
",compression" # allow compression ",compression" # allow compression
#"-w ?" # "sector size in bytes" #"-w ?" # "sector size in bytes"
# sector ? segments < section < zone # sector ? segments < section < zone
]); options = lib.mkDefault ([ ]); options = optionsToList (cfg.temp.mountOptions // {
# F2FS compresses only for performance and wear. The whole uncompressed space is still reserved (in case the file content needs to get replaced by incompressible data in-place). To free the gained space, »ioctl(fd, F2FS_IOC_RELEASE_COMPRESS_BLOCKS)« needs to be called per file, making the file immutable. Nix could do that when moving stuff into the store. # F2FS compresses only for performance and wear. The whole uncompressed space is still reserved (in case the file content needs to get replaced by incompressible data in-place). To free the gained space, »ioctl(fd, F2FS_IOC_RELEASE_COMPRESS_BLOCKS)« needs to be called per file, making the file immutable. Nix could do that when moving stuff into the store.
"compress_mode=fs" # enable compression for all files compress_mode = "fs"; # enable compression for all files
"compress_algorithm=lz4" # compress using lz4 compress_algorithm = "lz4"; # compress using lz4
"compress_chksum" # verify checksums (when decompressing data blocks?) compress_chksum = true; # verify checksums (when decompressing data blocks?)
"lazytime" # update timestamps asynchronously lazytime = true; # update timestamps asynchronously
] ++ cfg.local.mountOptions); }; }); };
# TODO: "F2FS and its tools support various parameters not only for configuring on-disk layout, but also for selecting allocation and cleaning algorithms." # TODO: "F2FS and its tools support various parameters not only for configuring on-disk layout, but also for selecting allocation and cleaning algorithms."
boot.initrd.kernelModules = [ "f2fs" ]; # This is not generally, but sometimes, required to boot. Strange. (Kernel message: »request_module fs-f2fs succeeded, but still no fs?«)
})) ] ++ (map (type: (lib.mkIf (cfg.${type}.type == "bind") { })) ] ++ (map (type: (lib.mkIf (cfg.${type}.type == "bind") {
fileSystems = (lib.mapAttrs (target: args@{ source, uid, gid, mode, extraFsOptions, ... }: extraFsOptions // (rec { fileSystems = (lib.mapAttrs (target: args@{ source, uid, gid, mode, extraFsConfig, ... }: extraFsConfig // (rec {
device = "${cfg.${type}.bind.source}/${source}"; options = (extraFsOptions.options or [ ]) ++ [ "bind" ] ++ cfg.${type}.mountOptions ++ args.options; device = "${cfg.${type}.bind.source}/${source}";
options = (extraFsConfig.options or [ ]) ++ (optionsToList (cfg.${type}.mountOptions // args.options // { bind = true; }));
preMountCommands = '' preMountCommands = ''
${extraFsOptions.preMountCommands or ""} ${extraFsConfig.preMountCommands or ""}
mkdir -pm 000 -- ${lib.escapeShellArg target} mkdir -pm 000 -- ${lib.escapeShellArg target}
mkdir -pm 000 -- ${lib.escapeShellArg device} mkdir -pm 000 -- ${lib.escapeShellArg device}
chown ${toString uid}:${toString gid} -- ${lib.escapeShellArg device} chown ${toString uid}:${toString gid} -- ${lib.escapeShellArg device}
@ -308,15 +325,15 @@ in {
}; };
} else { }) // (lib.wip.mapMerge (target: { source, options, zfsProps, uid, gid, mode, ... }: { } else { }) // (lib.wip.mapMerge (target: { source, options, zfsProps, uid, gid, mode, ... }: {
"${dataset}/${source}" = { "${dataset}/${source}" = {
mount = if lib.elem "noauto" options then "noauto" else true; inherit uid gid mode; mount = if (options.noauto or false) == true then "noauto" else true; inherit uid gid mode;
props = { canmount = "noauto"; mountpoint = target; } // zfsProps; props = { canmount = "noauto"; mountpoint = target; } // zfsProps;
}; };
}) cfg.${type}.mounts); }) cfg.${type}.mounts);
}; };
fileSystems = lib.mapAttrs (target: args@{ extraFsOptions, ... }: ( fileSystems = lib.mapAttrs (target: args@{ extraFsConfig, ... }: extraFsConfig // {
extraFsOptions options = (extraFsConfig.options or [ ]) ++ (optionsToList (cfg.${type}.mountOptions // args.options));
)) cfg.${type}.mounts; }) ((if type == "temp" then { "/" = { options = { }; extraFsConfig = { }; }; } else { }) // cfg.${type}.mounts);
}))) [ "temp" "local" "remote" ]))); }))) [ "temp" "local" "remote" ])));

View File

@ -65,6 +65,7 @@ in let module = {
services.zfs.autoScrub.enable = true; services.zfs.autoScrub.enable = true;
services.zfs.trim.enable = true; # (default) services.zfs.trim.enable = true; # (default)
## Implement »cfg.datasets.*.mount«:
fileSystems = lib.wip.mapMerge (path: { props, mount, ... }: if mount != false then { fileSystems = lib.wip.mapMerge (path: { props, mount, ... }: if mount != false then {
"${props.mountpoint}" = { fsType = "zfs"; device = path; options = [ "zfsutil" ] ++ (lib.optionals (mount == "noauto") [ "noauto" ]); }; "${props.mountpoint}" = { fsType = "zfs"; device = path; options = [ "zfsutil" ] ++ (lib.optionals (mount == "noauto") [ "noauto" ]); };
} else { }) cfg.datasets; } else { }) cfg.datasets;
@ -95,39 +96,78 @@ in let module = {
}; }) (lib.wip.filterMismatching ''/|^(mirror|raidz[123]?|draid[123]?.*|spare|log|dedup|special|cache)$'' (lib.concatLists (lib.catAttrs "vdevArgs" (lib.attrValues cfg.pools)))); }; }) (lib.wip.filterMismatching ''/|^(mirror|raidz[123]?|draid[123]?.*|spare|log|dedup|special|cache)$'' (lib.concatLists (lib.catAttrs "vdevArgs" (lib.attrValues cfg.pools))));
}; };
}) ({ ## Implement »cfg.extraInitrdPools«:
boot.initrd.postDeviceCommands = (lib.mkAfter '' boot.initrd.postDeviceCommands = (lib.mkAfter ''
${lib.concatStringsSep "\n" (map verbose.initrd-import-zpool cfg.extraInitrdPools)} ${lib.concatStringsSep "\n" (map verbose.initrd-import-zpool cfg.extraInitrdPools)}
${verbose.initrd-load-keys} ${verbose.initrd-load-keys}
''); '');
}) (let
}) (lib.mkIf (config.boot.resumeDevice == "") { ## Disallow hibernation without fixed »resumeDevice«:
boot.kernelParams = [ "nohibernate" "hibernate=no" ];
assertions = [ { # Ensure that none is overriding the above:
assertion = builtins.elem "nohibernate" config.boot.kernelParams;
message = ''Hibernation with ZFS (and NixOS' initrd) without fixed »resumeDevice« can/will lead to pool corruption. Disallow it by setting »boot.kernelParams = [ "nohibernate" ]«'';
} ];
}) (lib.mkIf (config.boot.resumeDevice != "") { ## Make resuming after hibernation safe with ZFS:
boot.kernelParams = [ "resume=${config.boot.resumeDevice}" ];
assertions = [ { # Just making sure ...
assertion = builtins.elem "resume=${config.boot.resumeDevice}" config.boot.kernelParams;
message = "When using ZFS and not disabling hibernation, make sure to set the »resume=« kernel parameter!";
} ];
boot.initrd.postDeviceCommands = let
inherit (config.system.build) extraUtils;
in (lib.mkBefore ''
# After hibernation, the pools MUST NOT be imported before resuming, as doing so can corrupt them.
# NixOS' mess of an initrd script does resuming after mounting FSs (which seems very unnecessarily late, resuming from a "file" doesn't need the FS to be mounted, does it?).
# This should generally run after all (also mapped) devices are created, but before any ZFS action, so do the hibernation resume here.
# But also only support a fixed »resumeDevice«. No guessing:
resumeInfo="$(udevadm info -q property "${config.boot.resumeDevice}" )"
if [ "$(echo "$resumeInfo" | sed -n 's/^ID_FS_TYPE=//p')" = "swsuspend" ]; then
resumeMajor="$(echo "$resumeInfo" | sed -n 's/^MAJOR=//p')"
resumeMinor="$(echo "$resumeInfo" | sed -n 's/^MINOR=//p')"
echo -n "Attempting to resume from hibernation (device $resumeMajor:$resumeMinor as ${config.boot.resumeDevice}) ..."
echo "$resumeMajor:$resumeMinor" > /sys/power/resume 2> /dev/null || echo "failed to wake from hibernation, continuing normal boot!"
fi
'');
#setsid ${extraUtils}/bin/ash -c "exec ${extraUtils}/bin/ash < /dev/$console >/dev/$console 2>/dev/$console"
}) (let ## Implement »cfg.pools.*.autoApplyDuringBoot« and »cfg.pools.*.autoApplyOnActivation«:
inherit (config.system.build) extraUtils; inherit (config.system.build) extraUtils;
anyPool = filterBy: lib.any (pool: pool.${filterBy}) (lib.attrValues cfg.pools); anyPool = filterBy: lib.any (pool: pool.${filterBy}) (lib.attrValues cfg.pools);
poolNames = filterBy: lib.attrNames (lib.filterAttrs (name: pool: pool.${filterBy}) cfg.pools); poolNames = filterBy: lib.attrNames (lib.filterAttrs (name: pool: pool.${filterBy}) cfg.pools);
filter = pool: "^${pool}($|[/])"; filter = pool: "^${pool}($|[/])";
ensure-datasets = pkgs.writeShellScript "ensure-datasets" '' ensure-datasets = zfsPackage: pkgs.writeShellScript "ensure-datasets" ''
${lib.wip.substituteImplicit { inherit pkgs; scripts = { inherit (lib.wip.setup-scripts) zfs utils; }; context = { inherit config; }; }} ${lib.wip.substituteImplicit { inherit pkgs; scripts = lib.attrValues { inherit (lib.wip.setup-scripts) zfs utils; }; context = { inherit config; native = pkgs // { zfs = zfsPackage; }; }; }}
set -eu ; ensure-datasets "$@" set -eu ; ensure-datasets "$@"
''; '';
ensure-datasets-for = filterBy: zfs: ''( if [ ! "''${IN_NIXOS_ENTER:-}" ] && [ -e ${zfs} ] ; then ensure-datasets-for = filterBy: zfsPackage: ''( if [ ! "''${IN_NIXOS_ENTER:-}" ] && [ -e ${zfsPackage}/bin/zfs ] ; then
PATH=$(dirname $(realpath ${zfs})):$PATH # (want to use the version that the kernel module uses)
${lib.concatStrings (map (pool: '' ${lib.concatStrings (map (pool: ''
expected=${lib.escapeShellArg (builtins.toJSON (lib.mapAttrs (n: v: v.props) (lib.filterAttrs (path: _: path == pool || lib.wip.startsWith "${pool}/" path) cfg.datasets)))} expected=${lib.escapeShellArg (builtins.toJSON (lib.mapAttrs (n: v: v.props) (lib.filterAttrs (path: _: path == pool || lib.wip.startsWith "${pool}/" path) cfg.datasets)))}
if [ "$(zfs get -H -o value nixos-${prefix}:applied-datasets ${pool})" != "$expected" ] ; then if [ "$(${zfsPackage}/bin/zfs get -H -o value nixos-${prefix}:applied-datasets ${pool})" != "$expected" ] ; then
${ensure-datasets} / ${lib.escapeShellArg (filter pool)} && zfs set nixos-${prefix}:applied-datasets="$expected" ${pool} ${ensure-datasets zfsPackage} / ${lib.escapeShellArg (filter pool)} && ${zfsPackage}/bin/zfs set nixos-${prefix}:applied-datasets="$expected" ${pool}
fi fi
'') (poolNames filterBy))} '') (poolNames filterBy))}
fi )''; fi )'';
in { in {
boot.initrd.postDeviceCommands = lib.mkIf (anyPool "autoApplyDuringBoot") (lib.mkOrder 2000 '' boot.initrd.postDeviceCommands = lib.mkIf (anyPool "autoApplyDuringBoot") (lib.mkOrder 2000 ''
${ensure-datasets-for "autoApplyDuringBoot" "${extraUtils}/bin/zfs"} ${ensure-datasets-for "autoApplyDuringBoot" extraUtils}
''); '');
boot.initrd.supportedFilesystems = lib.mkIf (anyPool "autoApplyDuringBoot") [ "zfs" ]; boot.initrd.supportedFilesystems = lib.mkIf (anyPool "autoApplyDuringBoot") [ "zfs" ];
${prefix}.fs.zfs.extraInitrdPools = (poolNames "autoApplyDuringBoot"); ${prefix}.fs.zfs.extraInitrdPools = (poolNames "autoApplyDuringBoot");
system.activationScripts.A_ensure-datasets = lib.mkIf (anyPool "autoApplyOnActivation") { system.activationScripts.A_ensure-datasets = lib.mkIf (anyPool "autoApplyOnActivation") {
text = ensure-datasets-for "autoApplyOnActivation" "/run/booted-system/sw/bin/zfs"; text = ensure-datasets-for "autoApplyOnActivation" (pkgs.runCommandLocal "booted-system-link" { } ''ln -sT /run/booted-system/sw $out''); # (want to use the version of ZFS that the kernel module uses, also it's convenient that this does not yet exist during activation at boot)
}; # these are sorted alphabetically, unless one gets "lifted up" by some other ending on it via its ».deps« field }; # these are sorted alphabetically, unless one gets "lifted up" by some other ending on it via its ».deps« field

View File

@ -33,7 +33,7 @@ in {
]; ];
in lib.mkIf cfg.enable (lib.mkMerge [ ({ in lib.mkIf cfg.enable (lib.mkMerge [ ({
environment.systemPackages = (with pkgs; [ dropbear ]); environment.systemPackages = [ pkgs.dropbear ];
networking.firewall.allowedTCPPorts = [ 22 ]; networking.firewall.allowedTCPPorts = [ 22 ];

View File

@ -88,8 +88,8 @@ in {
homepage = "https://github.com/rfc1036/udptunnel"; homepage = "https://github.com/rfc1036/udptunnel";
description = "Tunnel UDP packets in a TCP connection "; description = "Tunnel UDP packets in a TCP connection ";
license = lib.licenses.gpl2; license = lib.licenses.gpl2;
maintainers = with lib.maintainers; [ ]; maintainers = [ ];
platforms = with lib.platforms; linux; platforms = lib.platforms.linux;
}; };
}; };
} }

View File

@ -38,8 +38,8 @@ in {
homepage = "https://github.com/sbabic/libubootenv"; homepage = "https://github.com/sbabic/libubootenv";
description = "Generic library and tools to access and modify U-Boot environment from User Space"; description = "Generic library and tools to access and modify U-Boot environment from User Space";
license = [ lib.licenses.lgpl21Plus lib.licenses.mit lib.licenses.cc0 ]; license = [ lib.licenses.lgpl21Plus lib.licenses.mit lib.licenses.cc0 ];
maintainers = with lib.maintainers; [ ]; maintainers = [ ];
platforms = with lib.platforms; linux; platforms = lib.platforms.linux;
}; };
}; };
} }

View File

@ -8,14 +8,14 @@ Patches for `nixpkgs` are applied in `../flake.nix`.
To create/"commit" a patch of the current directory vs its latest commit: To create/"commit" a patch of the current directory vs its latest commit:
```bash ```bash
git diff >.../overlays/patches/....patch git diff >.../patches/....patch
``` ```
To test a patch against the repo in CWD, or to "check it out" to edit and then "commit" again: To test a patch against the repo in CWD, or to "check it out" to edit and then "commit" again:
```bash ```bash
git reset --hard HEAD # destructively reset the working tree to the current commit git reset --hard HEAD # destructively reset the working tree to the current commit
patch --dry-run -p1 <.../overlays/patches/....patch # test only patch --dry-run -p1 <.../patches/....patch # test only
patch -p1 <.../overlays/patches/....patch # apply to CWD patch -p1 <.../patches/....patch # apply to CWD
``` ```

View File

@ -163,21 +163,121 @@ index 34c9421..232285a 100644
case 'l': case 'l':
LoadBackupFile(backupFile, saveData, neverSaveData); LoadBackupFile(backupFile, saveData, neverSaveData);
free(backupFile); free(backupFile);
diff --git a/gpttext.cc b/gpttext.cc
index 170a169..43be9e5 100644
--- a/gpttext.cc
+++ b/gpttext.cc
@@ -197,6 +197,24 @@ void GPTDataTextUI::MoveMainTable(void) {
} // if
} // GPTDataTextUI::MoveMainTable()
+// Move the backup partition table.
+void GPTDataTextUI::MoveSecondTable(void) {
+ uint64_t newStart, pteSize = GetTableSizeInSectors();
+ uint64_t minValue = FindLastUsedLBA();
+ uint64_t maxValue = diskSize - 1 - pteSize;
+ ostringstream prompt;
+
+ cout << "Currently, backup partition table begins at sector " << secondHeader.partitionEntriesLBA
+ << " and ends at sector " << secondHeader.partitionEntriesLBA + pteSize - 1 << "\n";
+ prompt << "Enter new starting location (" << minValue << " to " << maxValue << "; default is " << minValue << "; 1 to abort): ";
+ newStart = GetNumber(1, maxValue, minValue, prompt.str());
+ if (newStart != 1) {
+ GPTData::MoveSecondTable(newStart);
+ } else {
+ cout << "Aborting change!\n";
+ } // if
+} // GPTDataTextUI::MoveSecondTable()
+
// Interactively create a partition
void GPTDataTextUI::CreatePartition(void) {
uint64_t firstBlock, firstInLargest, lastBlock, sector, origSector, lastAligned;
@@ -698,7 +716,7 @@ void GPTDataTextUI::ShowCommands(void) {
void GPTDataTextUI::RecoveryMenu(string filename) {
uint32_t numParts;
int goOn = 1, temp1;
-
+
do {
cout << "\nRecovery/transformation command (? for help): ";
switch (ReadString()[0]) {
@@ -824,7 +842,7 @@ void GPTDataTextUI::ExpertsMenu(string filename) {
string guidStr, device;
GUIDData aGUID;
ostringstream prompt;
-
+
do {
cout << "\nExpert command (? for help): ";
switch (ReadString()[0]) {
@@ -873,6 +891,9 @@ void GPTDataTextUI::ExpertsMenu(string filename) {
case 'j': case 'J':
MoveMainTable();
break;
+ case 'k': case 'K':
+ MoveSecondTable();
+ break;
case 'l': case 'L':
prompt.seekp(0);
prompt << "Enter the sector alignment value (1-" << MAX_ALIGNMENT << ", default = "
@@ -946,6 +967,7 @@ void GPTDataTextUI::ShowExpertCommands(void) {
cout << "h\trecompute CHS values in protective/hybrid MBR\n";
cout << "i\tshow detailed information on a partition\n";
cout << "j\tmove the main partition table\n";
+ cout << "k\tmove the backup partition table\n";
cout << "l\tset the sector alignment value\n";
cout << "m\treturn to main menu\n";
cout << "n\tcreate a new protective MBR\n";
@@ -1007,4 +1029,4 @@ UnicodeString ReadUString(void) {
return ReadString().c_str();
} // ReadUString()
#endif
-
+
diff --git a/gpttext.h b/gpttext.h
index 32e2f88..8ed6274 100644
--- a/gpttext.h
+++ b/gpttext.h
@@ -41,6 +41,7 @@ class GPTDataTextUI : public GPTData {
uint32_t GetPartNum(void);
void ResizePartitionTable(void);
void MoveMainTable(void);
+ void MoveSecondTable(void);
void CreatePartition(void);
void DeletePartition(void);
void ChangePartType(void);
diff --git a/sgdisk.8 b/sgdisk.8 diff --git a/sgdisk.8 b/sgdisk.8
index b966a13..dad877b 100644 index b966a13..6f8b375 100644
--- a/sgdisk.8 --- a/sgdisk.8
+++ b/sgdisk.8 +++ b/sgdisk.8
@@ -304,7 +304,7 @@ with the current final partition being aligned, and if \fBsgdisk\fR is asked @@ -304,13 +304,23 @@ with the current final partition being aligned, and if \fBsgdisk\fR is asked
to create a partition in that space, then it will \fBnot\fR be end\-aligned. to create a partition in that space, then it will \fBnot\fR be end\-aligned.
.TP .TP
-.B \-j, \-\-adjust\-main\-table=sector -.B \-j, \-\-adjust\-main\-table=sector
-Adjust the location of the main partition table. This value is normally 2,
+.B \-j, \-\-move\-main\-table=sector +.B \-j, \-\-move\-main\-table=sector
Adjust the location of the main partition table. This value is normally 2, +Sets the start sector of the main partition table. This value is normally 2,
but it may need to be increased in some cases, such as when a but it may need to be increased in some cases, such as when a
system\-on\-chip (SoC) is hard\-coded to read boot code from sector 2. I system\-on\-chip (SoC) is hard\-coded to read boot code from sector 2. I
recommend against adjusting this value unless doing so is absolutely
necessary.
+.TP
+.B \-k, \-\-move\-backup\-table=sector
+Sets the start sector of the second/backup partition table. The backup table
+is usually placed just before the last sector, which holds the backup header.
+The default value is thus the size of the disk, minus one, minus the total
+size of the partition table (in sectors, usually 32).
+There are probably very few reasons to ever change this, and while the EFI
+standard does not mandate it, most tooling assumes the backup table to be at
+the very end of the disk.
+
.TP
.B \-l, \-\-load\-backup=file
Load partition data from a backup file. This option is the reverse of the
diff --git a/sgdisk.html b/sgdisk.html diff --git a/sgdisk.html b/sgdisk.html
index 36a28bc..ec0f505 100644 index 36a28bc..98c20be 100644
--- a/sgdisk.html --- a/sgdisk.html
+++ b/sgdisk.html +++ b/sgdisk.html
@@ -195,7 +195,7 @@ when using this option. The others require a partition number. The @@ -195,7 +195,7 @@ when using this option. The others require a partition number. The
@ -189,7 +289,7 @@ index 36a28bc..ec0f505 100644
<B>sgdisk -A 4:set:2 /dev/sdc</B> to set the bit 2 attribute (legacy BIOS <B>sgdisk -A 4:set:2 /dev/sdc</B> to set the bit 2 attribute (legacy BIOS
bootable) on partition 4 on <I>/dev/sdc</I>. bootable) on partition 4 on <I>/dev/sdc</I>.
<P> <P>
@@ -344,7 +344,7 @@ if the free space at the end of a disk is less than the alignment value, @@ -344,15 +344,26 @@ if the free space at the end of a disk is less than the alignment value,
with the current final partition being aligned, and if <B>sgdisk</B> is asked with the current final partition being aligned, and if <B>sgdisk</B> is asked
to create a partition in that space, then it will <B>not</B> be end-aligned. to create a partition in that space, then it will <B>not</B> be end-aligned.
<P> <P>
@ -197,4 +297,24 @@ index 36a28bc..ec0f505 100644
+<DT><B>-j, --move-main-table=sector</B> +<DT><B>-j, --move-main-table=sector</B>
<DD> <DD>
Adjust the location of the main partition table. This value is normally 2, -Adjust the location of the main partition table. This value is normally 2,
+Sets the start sector of the main partition table. This value is normally 2,
but it may need to be increased in some cases, such as when a
system-on-chip (SoC) is hard-coded to read boot code from sector 2. I
recommend against adjusting this value unless doing so is absolutely
necessary.
<P>
+<DT><B>-k, --move-backup-table=sector</B>
+
+<DD>
+Sets the start sector of the second/backup partition table. The backup table
+is usually placed just before the last sector, which holds the backup header.
+The default value is thus the size of the disk, minus one, minus the total
+size of the partition table (in sectors, usually 32).
+There are probably very few reasons to ever change this, and while the EFI
+standard does not mandate it, most tooling assumes the backup table to be at
+the very end of the disk.
+<P>
<DT><B>-l, --load-backup=file</B>
<DD>