lots of fixes and tweaks,

generate partition tables in nix,
add open-system maintenance function
This commit is contained in:
Niklas Gollenstede 2022-06-04 20:54:09 +02:00
parent f56db19b5e
commit e46b671f8f
19 changed files with 233 additions and 112 deletions

View File

@ -67,7 +67,8 @@ As a local experiment, the result of running this in a `nix repl` is sufficient:
Often, the concept expressed by a source code file is at least as important as the concrete implementation of it. Often, the concept expressed by a source code file is at least as important as the concrete implementation of it.
`nix` unfortunately isn't super readable and also does not have documentation tooling support nearly on par with languages like TypeScript. `nix` unfortunately isn't super readable and also does not have documentation tooling support nearly on par with languages like TypeScript.
Embedding the source code "file" within a MarkDown file emphasizes the importance of textual expressions of the motivation and context of each piece of source code, and should thus incentivize writing sufficient documentation Embedding the source code "file" within a MarkDown file emphasizes the importance of textual expressions of the motivation and context of each piece of source code, and should thus incentivize writing sufficient documentation.
Having the documentation right next to the code should also help against documentation rot.
Technically, Nix (and most other code files) don't need to have any specific file extension. By embedding the MarkDown header in a block comment, the file can still be a valid source code file, while the MarDown header ending in a typed code block ensures proper syntax highlighting of the source code in editors or online repos. Technically, Nix (and most other code files) don't need to have any specific file extension. By embedding the MarkDown header in a block comment, the file can still be a valid source code file, while the MarDown header ending in a typed code block ensures proper syntax highlighting of the source code in editors or online repos.

View File

@ -10,12 +10,13 @@ Just to provide an example of what a host configuration using this set of librar
To prepare a virtual machine disk, as `sudo` user with `nix` installed, run in `..`: To prepare a virtual machine disk, as `sudo` user with `nix` installed, run in `..`:
```bash ```bash
nix run '.#example' -- sudo install-system /home/$(id -un)/vm/disks/example.img && sudo chown $(id -un): /home/$(id -un)/vm/disks/example.img nix run '.#example' -- sudo install-system /home/$(id -un)/vm/disks/example.img && sudo chown $(id -un): /home/$(id -un)/vm/disks/example.img
nix run '.#example-raidz' -- sudo install-system /tmp/nixos-main.img:raidz1=/tmp/nixos-rz1.img:raidz2=/tmp/nixos-rz2.img:raidz3=/tmp/nixos-rz3.img
``` ```
Then to run in a qemu VM with KVM: Then to boot the system in a qemu VM with KVM:
```bash ```bash
nix run '.#example' -- sudo run-qemu /home/$(id -un)/vm/disks/example.img nix run '.#example' -- sudo run-qemu /home/$(id -un)/vm/disks/example.img
``` ```
Or as user with vBox access run this and use the UI or the printed commands: Or as user with vBox access, run this and use the UI or the printed commands:
```bash ```bash
nix run '.#example' -- register-vbox /home/$(id -un)/vm/disks/example.img nix run '.#example' -- register-vbox /home/$(id -un)/vm/disks/example.img
``` ```

View File

@ -185,7 +185,7 @@ in rec {
# provide installer tools (native to localSystem, not targetSystem) # provide installer tools (native to localSystem, not targetSystem)
hostPath=$PATH hostPath=$PATH
PATH=${pkgs.nixos-install-tools}/bin:${nix_wrapped}/bin:${nix}/bin:${pkgs.util-linux}/bin:${pkgs.coreutils}/bin:${pkgs.gnused}/bin:${pkgs.findutils}/bin:${pkgs.tree}/bin:${pkgs.zfs}/bin PATH=${pkgs.nixos-install-tools}/bin:${nix_wrapped}/bin:${nix}/bin:${pkgs.util-linux}/bin:${pkgs.coreutils}/bin:${pkgs.gnused}/bin:${pkgs.gnugrep}/bin:${pkgs.findutils}/bin:${pkgs.tree}/bin:${pkgs.gawk}/bin:${pkgs.zfs}/bin
${appliedScripts} ${appliedScripts}

View File

@ -1,5 +1,6 @@
dirname: { self, nixpkgs, ...}: let dirname: inputs@{ self, nixpkgs, ...}: let
inherit (nixpkgs) lib; inherit (nixpkgs) lib;
inherit (import "${dirname}/vars.nix" dirname inputs) extractLineAnchored;
in rec { in rec {
# Turns an attr set into a bash dictionary (associative array) declaration, e.g.: # Turns an attr set into a bash dictionary (associative array) declaration, e.g.:
@ -62,6 +63,13 @@ in rec {
)}") scripts} )}") scripts}
''; '';
## Given a bash »script« as string and a function »name«, this finds and extracts the definition of that function in and from the script.
# The function definition has to start at the beginning of a line and must ends the next »}« of »)}« at the beginning of a line that is followed by nothing but a comment on that line.
extractBashFunction = script: name: let
inherit (extractLineAnchored ''${name}[ ]*[(][ ]*[)]|function[ ]+${name}[ ]'' true false script) line after;
body = builtins.split "(\n[)]?[}])([ ]*[#][^\n]*)?\n" after;
in if (builtins.length body) < 3 then null else line + (builtins.head body) + (builtins.head (builtins.elemAt body 1));
# Used as a »system.activationScripts« snippet, this performs substitutions on a »text« before writing it to »path«. # Used as a »system.activationScripts« snippet, this performs substitutions on a »text« before writing it to »path«.
# For each name-value pair in »substitutes«, all verbatim occurrences of the attribute name in »text« are replaced by the content of the file with path of the attribute value. # For each name-value pair in »substitutes«, all verbatim occurrences of the attribute name in »text« are replaced by the content of the file with path of the attribute value.
# Since this happens one by one in no defined order, the attribute values should be chosen such that they don't appear in any of the files that are substituted in. # Since this happens one by one in no defined order, the attribute values should be chosen such that they don't appear in any of the files that are substituted in.

View File

@ -20,7 +20,7 @@ function add-key-hostname {( set -eu # 1: usage
## Adds a key by copying it from a bootkey partition (see »add-bootkey-to-keydev«) to the keystore. ## Adds a key by copying it from a bootkey partition (see »add-bootkey-to-keydev«) to the keystore.
function add-key-usb-part {( set -eu # 1: usage function add-key-usb-part {( set -eu # 1: usage
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1 keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} ; usage=$1
if [[ ! "$usage" =~ ^(luks/keystore/.*)$ ]] ; then printf '»usb-part« key mode is only available for the keystore itself.\n' ; exit 1 ; fi if [[ ! "$usage" =~ ^(luks/keystore-[^/]+/[1-8])$ ]] ; then printf '»usb-part« key mode is only available for the keystore itself.\n' ; exit 1 ; fi
bootkeyPartlabel=bootkey-"@{config.networking.hostName!hashString.sha256:0:8}" bootkeyPartlabel=bootkey-"@{config.networking.hostName!hashString.sha256:0:8}"
cat /dev/disk/by-partlabel/"$bootkeyPartlabel" | write-secret "$keystore"/"$usage".key cat /dev/disk/by-partlabel/"$bootkeyPartlabel" | write-secret "$keystore"/"$usage".key
)} )}

View File

@ -33,7 +33,8 @@ function do-disk-setup { # 1: diskPaths
# * So alignment at the default »align=8MiB« actually seems a decent choice. # * So alignment at the default »align=8MiB« actually seems a decent choice.
## Partitions all »config.wip.fs.disks.devices« to ensure that all (correctly) specified »config.wip.fs.disks.partitions« exist. ## Partitions the »diskPaths« instances of all »config.wip.fs.disks.devices« to ensure that all specified »config.wip.fs.disks.partitions« exist.
# Parses »diskPaths«, creates and loop-mounts images for non-/dev/ paths, and tries to abort if any partition already exists on the host.
function partition-disks { { # 1: diskPaths function partition-disks { { # 1: diskPaths
beQuiet=/dev/null ; if [[ ${args[debug]:-} ]] ; then beQuiet=/dev/stdout ; fi beQuiet=/dev/null ; if [[ ${args[debug]:-} ]] ; then beQuiet=/dev/stdout ; fi
declare -g -A blockDevs=( ) # this ends up in the caller's scope declare -g -A blockDevs=( ) # this ends up in the caller's scope
@ -45,11 +46,11 @@ function partition-disks { { # 1: diskPaths
local name ; for name in "@{!config.wip.fs.disks.devices[@]}" ; do local name ; for name in "@{!config.wip.fs.disks.devices[@]}" ; do
if [[ ! ${blockDevs[$name]:-} ]] ; then echo "Path for block device $name not provided" ; exit 1 ; fi if [[ ! ${blockDevs[$name]:-} ]] ; then echo "Path for block device $name not provided" ; exit 1 ; fi
if [[ ! ${blockDevs[$name]} =~ ^(/dev/.*)$ ]] ; then if [[ ${blockDevs[$name]} != /dev/* ]] ; then
local outFile=${blockDevs[$name]} ; ( set -eu local outFile=${blockDevs[$name]} ; ( set -eu
eval 'declare -A disk='"@{config.wip.fs.disks.devices[$name]}" eval 'declare -A disk='"@{config.wip.fs.disks.devices[$name]}"
install -o root -g root -m 640 -T /dev/null "$outFile" && fallocate -l "${disk[size]}" "$outFile" install -o root -g root -m 640 -T /dev/null "$outFile" && truncate -s "${disk[size]}" "$outFile"
) && blockDevs[$name]=$(losetup --show -f "$outFile") && prepend_trap "losetup -d ${blockDevs[$name]}" EXIT # NOTE: this must not be inside a sub-shell! ) && blockDevs[$name]=$(losetup --show -f "$outFile") && prepend_trap "losetup -d '${blockDevs[$name]}'" EXIT # NOTE: this must not be inside a sub-shell!
else else
if [[ ! "$(blockdev --getsize64 "${blockDevs[$name]}")" ]] ; then echo "Block device $name does not exist at ${blockDevs[$name]}" ; exit 1 ; fi if [[ ! "$(blockdev --getsize64 "${blockDevs[$name]}")" ]] ; then echo "Block device $name does not exist at ${blockDevs[$name]}" ; exit 1 ; fi
blockDevs[$name]=$(realpath "${blockDevs[$name]}") blockDevs[$name]=$(realpath "${blockDevs[$name]}")
@ -60,22 +61,35 @@ function partition-disks { { # 1: diskPaths
for partDecl in "@{config.wip.fs.disks.partitionList[@]}" ; do for partDecl in "@{config.wip.fs.disks.partitionList[@]}" ; do
eval 'declare -A part='"$partDecl" eval 'declare -A part='"$partDecl"
if [[ -e /dev/disk/by-partlabel/"${part[name]}" ]] && ! is-partition-on-disks /dev/disk/by-partlabel/"${part[name]}" "${blockDevs[@]}" ; then echo "Partition /dev/disk/by-partlabel/${part[name]} exists but does not reside on one of the target disks ${blockDevs[@]}" ; exit 1 ; fi if [[ -e /dev/disk/by-partlabel/"${part[name]}" ]] && ! is-partition-on-disks /dev/disk/by-partlabel/"${part[name]}" "${blockDevs[@]}" ; then echo "Partition /dev/disk/by-partlabel/${part[name]} already exists on this host and does not reside on one of the target disks ${blockDevs[@]}. Refusing to create another partition with the same partlabel!" ; exit 1 ; fi
done done
for name in "@{!config.wip.fs.disks.devices[@]}" ; do ( for name in "@{!config.wip.fs.disks.devices[@]}" ; do
eval 'declare -A disk='"@{config.wip.fs.disks.devices[$name]}" eval 'declare -A disk='"@{config.wip.fs.disks.devices[$name]}"
if [[ ${disk[serial]:-} ]] ; then if [[ ${disk[serial]:-} ]] ; then
actual=$(udevadm info --query=property --name="${blockDevs[${disk[name]}]}" | @{native.gnugrep}/bin/grep -oP 'ID_SERIAL_SHORT=\K.*') actual=$(udevadm info --query=property --name="$blockDev" | grep -oP 'ID_SERIAL_SHORT=\K.*')
if [[ ${disk[serial]} != "$actual" ]] ; then echo "Block device ${blockDevs[${disk[name]}]} does not match the serial declared for ${disk[name]}" ; exit 1 ; fi if [[ ${disk[serial]} != "$actual" ]] ; then echo "Block device $blockDev does not match the serial declared for ${disk[name]}" ; exit 1 ; fi
fi fi
partition-disk "${disk[name]}" "${blockDevs[${disk[name]}]}"
done
@{native.parted}/bin/partprobe "${blockDevs[@]}"
@{native.systemd}/bin/udevadm settle -t 15 || true # sometimes partitions aren't quite made available yet
# ensure that filesystem creation does not complain about the devices already being occupied by a previous filesystem
wipefs --all "@{config.wip.fs.disks.partitions!attrNames[@]/#/'/dev/disk/by-partlabel/'}" >$beQuiet
)}
## Given a declared disk device's »name« and a path to an actual »blockDev« (or image) file, partitions the device as declared in the config.
function partition-disk {( set -eu # 1: name, 2: blockDev, 3?: devSize
name=$1 ; blockDev=$2
eval 'declare -A disk='"@{config.wip.fs.disks.devices[$name]}"
devSize=${3:-$(@{native.util-linux}/bin/blockdev --getsize64 "$blockDev")}
declare -a sgdisk=( --zap-all ) # delete existing part tables declare -a sgdisk=( --zap-all ) # delete existing part tables
for partDecl in "@{config.wip.fs.disks.partitionList[@]}" ; do for partDecl in "@{config.wip.fs.disks.partitionList[@]}" ; do
eval 'declare -A part='"$partDecl" eval 'declare -A part='"$partDecl"
if [[ ${part[disk]} != "${disk[name]}" ]] ; then continue ; fi if [[ ${part[disk]} != "${disk[name]}" ]] ; then continue ; fi
if [[ ${part[size]:-} =~ ^[0-9]+%$ ]] ; then if [[ ${part[size]:-} =~ ^[0-9]+%$ ]] ; then
devSize=$(blockdev --getsize64 "${blockDevs[${disk[name]}]}")
part[size]=$(( $devSize / 1024 * ${part[size]:0:(-1)} / 100 ))K part[size]=$(( $devSize / 1024 * ${part[size]:0:(-1)} / 100 ))K
fi fi
sgdisk+=( -a "${part[alignment]:-${disk[alignment]}}" -n "${part[index]:-0}":"${part[position]}":+"${part[size]:-}" -t 0:"${part[type]}" -c 0:"${part[name]}" ) sgdisk+=( -a "${part[alignment]:-${disk[alignment]}}" -n "${part[index]:-0}":"${part[position]}":+"${part[size]:-}" -t 0:"${part[type]}" -c 0:"${part[name]}" )
@ -85,7 +99,7 @@ function partition-disks { { # 1: diskPaths
sgdisk+=( --hybrid "${disk[mbrParts]}" ) # --hybrid: create MBR in addition to GPT; ${disk[mbrParts]}: make these GPT part 1 MBR parts 2[3[4]] sgdisk+=( --hybrid "${disk[mbrParts]}" ) # --hybrid: create MBR in addition to GPT; ${disk[mbrParts]}: make these GPT part 1 MBR parts 2[3[4]]
fi fi
( PATH=@{native.gptfdisk}/bin ; set -x ; sgdisk "${sgdisk[@]}" "${blockDevs[${disk[name]}]}" >$beQuiet ) # running all at once is much faster ( PATH=@{native.gptfdisk}/bin ; set -x ; sgdisk "${sgdisk[@]}" "$blockDev" >$beQuiet ) # running all at once is much faster
if [[ ${disk[mbrParts]:-} ]] ; then if [[ ${disk[mbrParts]:-} ]] ; then
printf " printf "
@ -94,7 +108,7 @@ function partition-disks { { # 1: diskPaths
# move the selected »mbrParts« to slots 1[2[3]] instead of 2[3[4]] (by re-creating part1 in the last sector, then sorting) # move the selected »mbrParts« to slots 1[2[3]] instead of 2[3[4]] (by re-creating part1 in the last sector, then sorting)
n;p;1 # new ; primary ; part1 n;p;1 # new ; primary ; part1
$(( $(blockSectorCount "${blockDevs[${disk[name]}]}") - 1)) # start (size 1sec) $(( ($devSize/512) - 1)) # start (size 1sec)
x;f;r # expert mode ; fix order ; return x;f;r # expert mode ; fix order ; return
d;$(( (${#disk[mbrParts]} + 1) / 2 + 1 )) # delete ; part(last) d;$(( (${#disk[mbrParts]} + 1) / 2 + 1 )) # delete ; part(last)
@ -105,15 +119,8 @@ function partition-disks { { # 1: diskPaths
${disk[extraFDiskCommands]} ${disk[extraFDiskCommands]}
p;w;q # print ; write ; quit p;w;q # print ; write ; quit
" | @{native.gnused}/bin/sed -E 's/^ *| *(#.*)?$//g' | @{native.gnused}/bin/sed -E 's/\n\n+| *; */\n/g' | tee >((echo -n '++ ' ; tr $'\n' '|' ; echo) 1>&2) | ( set -x ; fdisk "${blockDevs[${disk[name]}]}" &>$beQuiet ) " | @{native.gnused}/bin/sed -E 's/^ *| *(#.*)?$//g' | @{native.gnused}/bin/sed -E 's/\n\n+| *; */\n/g' | tee >((echo -n '++ ' ; tr $'\n' '|' ; echo) 1>&2) | ( PATH=@{native.util-linux}/bin ; set -x ; fdisk "$blockDev" &>$beQuiet )
fi fi
) ; done
@{native.parted}/bin/partprobe "${blockDevs[@]}"
@{native.systemd}/bin/udevadm settle -t 15 || true # sometimes partitions aren't quite made available yet
# ensure that filesystem creation does not complain about the devices already being occupied by a previous filesystem
wipefs --all "@{config.wip.fs.disks.partitions!attrNames[@]/#/'/dev/disk/by-partlabel/'}" >$beQuiet
)} )}
## Checks whether a »partition« resides on one of the provided »blockDevs«. ## Checks whether a »partition« resides on one of the provided »blockDevs«.
@ -152,31 +159,35 @@ function format-partitions {( set -eu
## Mounts all file systems as it would happen during boot, but at path prefix »$mnt« (instead of »/«). ## Mounts all file systems as it would happen during boot, but at path prefix »$mnt« (instead of »/«).
function mount-system {( set -eu # 1: mnt, 2?: fstabPath function mount-system {( set -eu # 1: mnt, 2?: fstabPath
# TODO: »config.system.build.fileSystems« is a dependency-sorted list. Could use that ...
# mount --all --fstab @{config.system.build.toplevel}/etc/fstab --target-prefix "$1" -o X-mount.mkdir # (»--target-prefix« is not supported on Ubuntu 20.04) # mount --all --fstab @{config.system.build.toplevel}/etc/fstab --target-prefix "$1" -o X-mount.mkdir # (»--target-prefix« is not supported on Ubuntu 20.04)
mnt=$1 ; fstabPath=${2:-"@{config.system.build.toplevel}/etc/fstab"} mnt=$1 ; fstabPath=${2:-"@{config.system.build.toplevel}/etc/fstab"}
PATH=@{native.e2fsprogs}/bin:@{native.f2fs-tools}/bin:@{native.xfsprogs}/bin:@{native.dosfstools}/bin:$PATH PATH=@{native.e2fsprogs}/bin:@{native.f2fs-tools}/bin:@{native.xfsprogs}/bin:@{native.dosfstools}/bin:$PATH
<$fstabPath @{native.gnugrep}/bin/grep -v '^#' | LC_ALL=C sort -k2 | while read source target type options numbers ; do <$fstabPath grep -v '^#' | LC_ALL=C sort -k2 | while read source target type options numbers ; do
if [[ ! $target || $target == none ]] ; then continue ; fi if [[ ! $target || $target == none ]] ; then continue ; fi
options=,$options, ; options=${options//,ro,/,} options=,$options, ; options=${options//,ro,/,}
if [[ $options =~ ,r?bind, ]] || [[ $type == overlay ]] ; then continue ; fi if [[ $options =~ ,r?bind, ]] || [[ $type == overlay ]] ; then continue ; fi
if ! mountpoint -q "$mnt"/"$target" ; then ( if ! mountpoint -q "$mnt"/"$target" ; then (
mkdir -p "$mnt"/"$target" mkdir -p "$mnt"/"$target"
[[ $type == tmpfs ]] || @{native.kmod}/bin/modprobe --quiet $type || true # (this does help sometimes) [[ $type == tmpfs || $type == */* ]] || @{native.kmod}/bin/modprobe --quiet $type || true # (this does help sometimes)
mount -t $type -o "${options:1:(-1)}" "$source" "$mnt"/"$target" mount -t $type -o "${options:1:(-1)}" "$source" "$mnt"/"$target"
) || [[ $options == *,nofail,* ]] ; fi # (actually, nofail already makes mount fail silently) ) || [[ $options == *,nofail,* ]] ; fi # (actually, nofail already makes mount fail silently)
done done
# Since bind mounts may depend on other mounts not only for the target (which the sort takes care of) but also for the source, do all bind mounts last. This would break if there was a different bind mountpoint within a bind-mounted target. # Since bind mounts may depend on other mounts not only for the target (which the sort takes care of) but also for the source, do all bind mounts last. This would break if there was a different bind mountpoint within a bind-mounted target.
<$fstabPath @{native.gnugrep}/bin/grep -v '^#' | LC_ALL=C sort -k2 | while read source target type options numbers ; do <$fstabPath grep -v '^#' | LC_ALL=C sort -k2 | while read source target type options numbers ; do
if [[ ! $target || $target == none ]] ; then continue ; fi if [[ ! $target || $target == none ]] ; then continue ; fi
options=,$options, ; options=${options//,ro,/,} options=,$options, ; options=${options//,ro,/,}
if [[ $options =~ ,r?bind, ]] || [[ $type == overlay ]] ; then : ; else continue ; fi if [[ $options =~ ,r?bind, ]] || [[ $type == overlay ]] ; then : ; else continue ; fi
if ! mountpoint -q "$mnt"/"$target" ; then ( if ! mountpoint -q "$mnt"/"$target" ; then (
mkdir -p "$mnt"/"$target" mkdir -p "$mnt"/"$target"
if [[ $type == overlay ]] ; then if [[ $type == overlay ]] ; then
options=${options//,workdir=/,workdir=$mnt\/} ; options=${options//,upperdir=/,upperdir=$mnt\/} # work and upper dirs must be in target, lower dirs are probably store paths options=${options//,workdir=/,workdir=$mnt\/} ; options=${options//,upperdir=/,upperdir=$mnt\/} # Work and upper dirs must be in target.
workdir=$(<<<"$options" @{native.gnugrep}/bin/grep -o -P ',workdir=\K[^,]+' || true) ; if [[ $workdir ]] ; then mkdir -p "$workdir" ; fi workdir=$(<<<"$options" grep -o -P ',workdir=\K[^,]+' || true) ; if [[ $workdir ]] ; then mkdir -p "$workdir" ; fi
upperdir=$(<<<"$options" @{native.gnugrep}/bin/grep -o -P ',upperdir=\K[^,]+' || true) ; if [[ $upperdir ]] ; then mkdir -p "$upperdir" ; fi upperdir=$(<<<"$options" grep -o -P ',upperdir=\K[^,]+' || true) ; if [[ $upperdir ]] ; then mkdir -p "$upperdir" ; fi
lowerdir=$(<<<"$options" grep -o -P ',lowerdir=\K[^,]+' || true) # TODO: test the lowerdir stuff
options=${options//,lowerdir=$lowerdir,/,lowerdir=$mnt/${lowerdir//:/:$mnt\/},} ; source=overlay
else else
if [[ $source == /nix/store/* ]] ; then options=,ro$options ; fi
source=$mnt/$source ; if [[ ! -e $source ]] ; then mkdir -p "$source" ; fi source=$mnt/$source ; if [[ ! -e $source ]] ; then mkdir -p "$source" ; fi
fi fi
mount -t $type -o "${options:1:(-1)}" "$source" "$mnt"/"$target" mount -t $type -o "${options:1:(-1)}" "$source" "$mnt"/"$target"
@ -187,13 +198,10 @@ function mount-system {( set -eu # 1: mnt, 2?: fstabPath
## Unmounts all file systems (that would be mounted during boot / by »mount-system«). ## Unmounts all file systems (that would be mounted during boot / by »mount-system«).
function unmount-system {( set -eu # 1: mnt, 2?: fstabPath function unmount-system {( set -eu # 1: mnt, 2?: fstabPath
mnt=$1 ; fstabPath=${2:-"@{config.system.build.toplevel}/etc/fstab"} mnt=$1 ; fstabPath=${2:-"@{config.system.build.toplevel}/etc/fstab"}
<$fstabPath @{native.gnugrep}/bin/grep -v '^#' | LC_ALL=C sort -k2 -r | while read source target rest ; do <$fstabPath grep -v '^#' | LC_ALL=C sort -k2 -r | while read source target rest ; do
if [[ ! $target || $target == none ]] ; then continue ; fi if [[ ! $target || $target == none ]] ; then continue ; fi
if mountpoint -q "$mnt"/"$target" ; then if mountpoint -q "$mnt"/"$target" ; then
umount "$mnt"/"$target" umount "$mnt"/"$target"
fi fi
done done
)} )}
## Given a block device path, returns the number of 512byte sectors it can hold.
function blockSectorCount { printf %s "$(( $(blockdev --getsize64 "$1") / 512 ))" ; }

View File

@ -33,9 +33,9 @@ function prepare-installer { # ...
if zfs get -o value -H name "$poolName" &>/dev/null ; then echo "ZFS pool »$poolName« is already imported. Export the pool before running the installer." ; exit 1 ; fi if zfs get -o value -H name "$poolName" &>/dev/null ; then echo "ZFS pool »$poolName« is already imported. Export the pool before running the installer." ; exit 1 ; fi
done done
if [[ ${SUDO_USER:-} ]] ; then function nix {( set +x ; declare -a args=("$@") ; PATH=/bin:/usr/bin su - "$SUDO_USER" -c "$(declare -p args)"' ; nix "${args[@]}"' )} ; fi if [[ ${SUDO_USER:-} ]] ; then function nix {( set +x ; declare -a args=("$@") ; PATH=$hostPath su - "$SUDO_USER" -c "$(declare -p args)"' ; nix "${args[@]}"' )} ; fi
if [[ ${args[debug]:-} ]] ; then set +e ; set -E ; trap 'code= ; @{native.bashInteractive}/bin/bash --init-file @{config.environment.etc.bashrc.source} || code=$? ; if [[ $code ]] ; then exit $code ; fi' ERR ; fi # On error, instead of exiting straight away, open a shell to allow diagnosing/fixing the issue. Only exit if that shell reports failure (e.g. CtrlC + CtrlD). Unfortunately, the exiting has to be repeated for level of each nested sub-shells. if [[ ${args[debug]:-} ]] ; then set +e ; set -E ; trap 'code= ; cat 2>/dev/null || true ; @{native.bashInteractive}/bin/bash --init-file @{config.environment.etc.bashrc.source} || code=$? ; if [[ $code ]] ; then exit $code ; fi' ERR ; fi # On error, instead of exiting straight away, open a shell to allow diagnosing/fixing the issue. Only exit if that shell reports failure (e.g. CtrlC + CtrlD). Unfortunately, the exiting has to be repeated for level of each nested sub-shells. The cat eats anything lined up on stdin, which would otherwise be run in the shell (TODO: but it blocks if there is nothing on stdin, requiring Ctrl+D to be pressed).
} }
@ -44,11 +44,11 @@ function prepare-installer { # ...
# The restore commands are expected to pull in a backup of the systems secrets and state from somewhere, and need to acknowledge that something happened by running »restore-supported-callback«. # The restore commands are expected to pull in a backup of the systems secrets and state from somewhere, and need to acknowledge that something happened by running »restore-supported-callback«.
function init-or-restore-system {( set -eu # (void) function init-or-restore-system {( set -eu # (void)
if [[ ! ${args[restore]:-} ]] ; then if [[ ! ${args[restore]:-} ]] ; then
run-hook-script 'System Initialization' @{config.wip.fs.disks.initSystemCommands!writeText.postPartitionCommands} # TODO: Do this later inside the chroot? run-hook-script 'System Initialization' @{config.wip.fs.disks.initSystemCommands!writeText.initSystemCommands} # TODO: Do this later inside the chroot?
return # usually, this would be it ... return # usually, this would be it ...
fi fi
requiresRestoration=$(mktemp) ; trap "rm -f '$requiresRestoration'" EXIT ; function restore-supported-callback {( rm -f "$requiresRestoration" )} requiresRestoration=$(mktemp) ; trap "rm -f '$requiresRestoration'" EXIT ; function restore-supported-callback {( rm -f "$requiresRestoration" )}
run-hook-script 'System Restoration' @{config.wip.fs.disks.restoreSystemCommands!writeText.postPartitionCommands} run-hook-script 'System Restoration' @{config.wip.fs.disks.restoreSystemCommands!writeText.restoreSystemCommands}
if [[ -e $requiresRestoration ]] ; then echo 'The »restoreSystemCommands« did not call »restore-supported-callback« to mark backup restoration as supported for this system. Assuming incomplete configuration.' 1>&2 ; exit 1 ; fi if [[ -e $requiresRestoration ]] ; then echo 'The »restoreSystemCommands« did not call »restore-supported-callback« to mark backup restoration as supported for this system. Assuming incomplete configuration.' 1>&2 ; exit 1 ; fi
)} )}

View File

@ -7,7 +7,7 @@ function prompt-for-user-passwords { # (void)
userPasswords[$user]=@{config.users.users!catAttrSets.password[$user]} userPasswords[$user]=@{config.users.users!catAttrSets.password[$user]}
done done
for user in "@{!config.users.users!catAttrSets.passwordFile[@]}" ; do for user in "@{!config.users.users!catAttrSets.passwordFile[@]}" ; do
if ! userPasswords[$user]=$(prompt-new-password "for the user account »$user«") ; then exit 1 ; fi if ! userPasswords[$user]=$(prompt-new-password "for the user account »$user«") ; then return 1 ; fi
done done
} }
@ -35,15 +35,15 @@ function populate-keystore { { # (void)
done done
for usage in "${!methods[@]}" ; do for usage in "${!methods[@]}" ; do
if [[ "${methods[$usage]}" == home-pw || "${methods[$usage]}" == copy ]] ; then continue ; fi if [[ "${methods[$usage]}" == home-pw || "${methods[$usage]}" == copy ]] ; then continue ; fi
add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" || return 1
done done
for usage in "${!methods[@]}" ; do for usage in "${!methods[@]}" ; do
if [[ "${methods[$usage]}" != home-pw ]] ; then continue ; fi if [[ "${methods[$usage]}" != home-pw ]] ; then continue ; fi
add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" || return 1
done done
for usage in "${!methods[@]}" ; do for usage in "${!methods[@]}" ; do
if [[ "${methods[$usage]}" != copy ]] ; then continue ; fi if [[ "${methods[$usage]}" != copy ]] ; then continue ; fi
add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" add-key-"${methods[$usage]}" "$usage" "${options[$usage]}" || return 1
done done
)} )}
@ -70,6 +70,7 @@ function create-luks-layers {( set -eu # (void)
function open-luks-layers { # (void) function open-luks-layers { # (void)
keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8}
for luksName in "@{!config.boot.initrd.luks.devices!catAttrSets.device[@]}" ; do for luksName in "@{!config.boot.initrd.luks.devices!catAttrSets.device[@]}" ; do
if [[ -e /dev/mapper/$luksName ]] ; then continue ; fi
rawDev=@{config.boot.initrd.luks.devices!catAttrSets.device[$luksName]} rawDev=@{config.boot.initrd.luks.devices!catAttrSets.device[$luksName]}
primaryKey="$keystore"/luks/"$luksName"/0.key primaryKey="$keystore"/luks/"$luksName"/0.key

View File

@ -106,3 +106,62 @@ function add-bootkey-to-keydev {( set -eu # 1: blockDev, 2?: hostHash
@{native.parted}/bin/partprobe "$blockDev" ; @{native.systemd}/bin/udevadm settle -t 15 # wait for partitions to update @{native.parted}/bin/partprobe "$blockDev" ; @{native.systemd}/bin/udevadm settle -t 15 # wait for partitions to update
</dev/urandom tr -dc 0-9a-f | head -c 512 >/dev/disk/by-partlabel/"$bootkeyPartlabel" </dev/urandom tr -dc 0-9a-f | head -c 512 >/dev/disk/by-partlabel/"$bootkeyPartlabel"
)} )}
## Tries to open and mount the systems keystore from its LUKS partition. If successful, adds the traps to close it when the parent shell exits.
# See »open-system«'s implementation for some example calls to this function.
function mount-keystore-luks { # ...: cryptsetupOptions
# (For the traps to work, this can't run in a sub shell. The function therefore can't use »( set -eu ; ... )« internally and instead has to use »&&« after every command and in place of most »;«, and the function can't be called from a pipeline.)
keystore=keystore-@{config.networking.hostName!hashString.sha256:0:8} &&
mkdir -p -- /run/$keystore &&
@{native.cryptsetup}/bin/cryptsetup open "$@" /dev/disk/by-partlabel/$keystore $keystore &&
mount -o nodev,umask=0077,fmask=0077,dmask=0077,ro /dev/mapper/$keystore /run/$keystore &&
prepend_trap "umount /run/$keystore ; @{native.cryptsetup}/bin/cryptsetup close $keystore ; rmdir /run/$keystore" EXIT
}
## Performs any steps necessary to mount the target system at »/tmp/nixos-install-@{config.networking.hostName}« on the current host.
# For any steps taken, it also adds the reaps to undo them on exit from the calling shell, and it always adds the exit trap to do the unmounting itself.
# »diskImages« may be passed in the same format as to the installer. If so, any image files are ensured to be loop-mounted.
# Perfect to inspect/update/amend/repair a system's installation afterwards, e.g.:
# $ source ${config_wip_fs_disks_initSystemCommands1writeText_initSystemCommands}
# $ source ${config_wip_fs_disks_restoreSystemCommands1writeText_restoreSystemCommands}
# $ install-system-to /tmp/nixos-install-${config_networking_hostName}
# $ nixos-enter --root /tmp/nixos-install-${config_networking_hostName}
function open-system { # 1?: diskImages
# (for the traps to work, this can't run in a sub shell, so also can't »set -eu«, so use »&&« after every command and in place of most »;«)
local diskImages=${1:-} # If »diskImages« were specified and they point at files that aren't loop-mounted yet, then loop-mount them now:
local images=$( losetup --list --all --raw --noheadings --output BACK-FILE )
local decl && for decl in ${diskImages//:/ } ; do
local image=${decl/*=/} && if [[ $image != /dev/* ]] && ! <<<$images grep -xF $image ; then
local blockDev=$( losetup --show -f "$image" ) && prepend_trap "losetup -d '$blockDev'" EXIT &&
@{native.parted}/bin/partprobe "$blockDev" &&
:;fi &&
:;done &&
( @{native.systemd}/bin/udevadm settle -t 15 || true ) && # sometimes partitions aren't quite made available yet
if [[ @{config.wip.fs.keystore.enable} && ! -e /dev/mapper/keystore-@{config.networking.hostName!hashString.sha256:0:8} ]] ; then # Try a bunch of approaches for opening the keystore:
mount-keystore-luks --key-file=<(printf %s "@{config.networking.hostName}") ||
mount-keystore-luks --key-file=/dev/disk/by-partlabel/bootkey-@{config.networking.hostName!hashString.sha256:0:8} ||
mount-keystore-luks --key-file=<( read -s -p PIN: pin && echo ' touch!' >&2 && ykchalresp -2 "$pin" ) ||
# TODO: try static yubikey challenge
mount-keystore-luks
fi &&
local mnt=/tmp/nixos-install-@{config.networking.hostName} && if [[ ! -e $mnt ]] ; then mkdir -p "$mnt" && prepend_trap "rmdir '$mnt'" EXIT ; fi &&
open-luks-layers && # Load crypt layers and zfs pools:
if [[ $( LC_ALL=C type -t ensure-datasets ) == 'function' ]] ; then
local poolName && for poolName in "@{!config.wip.fs.zfs.pools[@]}" ; do
if ! zfs get -o value -H name "$poolName" &>/dev/null ; then
zpool import -f -N -R "$mnt" "$poolName" && prepend_trap "zpool export '$poolName'" EXIT &&
:;fi &&
: | zfs load-key -r "$poolName" || true &&
:;done &&
ensure-datasets "$mnt" &&
:;fi &&
prepend_trap "unmount-system '$mnt'" EXIT && mount-system "$mnt" &&
true # (success)
}

View File

@ -21,7 +21,7 @@ function generic-arg-parse { # ...
## Prepends a command to a trap. Especially useful fo define »finally« commands via »prepend_trap '<command>' EXIT«. ## Prepends a command to a trap. Especially useful fo define »finally« commands via »prepend_trap '<command>' EXIT«.
# NOTE: When calling this in a sub-shell whose parents already has traps installed, make sure to do »trap - trapName« first. On a new shell, this should be a no-op, but without it, the parent shell's traps will be added to the sub-shell as well (due to strange behavior of »trap -p« (in bash ~5.1.8)). # NOTE: When calling this in a sub-shell whose parents already has traps installed, make sure to do »trap - trapName« first. On a new shell, this should be a no-op, but without it, the parent shell's traps will be added to the sub-shell as well (due to strange behavior of »trap -p« (in bash ~5.1.8)).
prepend_trap() { # 1: command, ...: trapNames function prepend_trap { # 1: command, ...: trapNames
fatal() { printf "ERROR: $@\n" >&2 ; return 1 ; } fatal() { printf "ERROR: $@\n" >&2 ; return 1 ; }
local cmd=$1 ; shift || fatal "${FUNCNAME} usage error" local cmd=$1 ; shift || fatal "${FUNCNAME} usage error"
local name ; for name in "$@" ; do local name ; for name in "$@" ; do
@ -30,7 +30,8 @@ prepend_trap() { # 1: command, ...: trapNames
p3() { printf '%s\n' "${3:-}" ; } ; eval "p3 $(trap -p "${name}")" p3() { printf '%s\n' "${3:-}" ; } ; eval "p3 $(trap -p "${name}")"
)" "${name}" || fatal "unable to add to trap ${name}" )" "${name}" || fatal "unable to add to trap ${name}"
done done
} ; declare -f -t prepend_trap # required to modify DEBUG or RETURN traps }
declare -f -t prepend_trap # required to modify DEBUG or RETURN traps
## Writes a »$name«d secret from stdin to »$targetDir«, ensuring proper file permissions. ## Writes a »$name«d secret from stdin to »$targetDir«, ensuring proper file permissions.

View File

@ -31,8 +31,8 @@ function create-zpools { # 1: mnt
} }
## Ensures that the system's datasets exist and have the defined properties (but not that they don't have properties that aren't defined). ## Ensures that the system's datasets exist and have the defined properties (but not that they don't have properties that aren't defined).
# The pool(s) must exist, be imported with root prefix »$mnt«, and (if datasets are to be created or encryption roots to be inherited) the system's keystore must be open (see »mount-keystore-luks«). # The pool(s) must exist, be imported with root prefix »$mnt«, and (if datasets are to be created or encryption roots to be inherited) the system's keystore must be open (see »mount-keystore-luks«) or the keys be loaded.
# »keystatus« and »mounted« of existing datasets should remain unchained, newly crated datasets will not be mounted but have their keys loaded. # »keystatus« and »mounted« of existing datasets should remain unchanged, newly crated datasets will not be mounted but have their keys loaded.
function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp
if (( @{#config.wip.fs.zfs.datasets[@]} == 0 )) ; then return ; fi if (( @{#config.wip.fs.zfs.datasets[@]} == 0 )) ; then return ; fi
mnt=$1 ; while [[ "$mnt" == */ ]] ; do mnt=${mnt:0:(-1)} ; done # (remove any tailing slashes) mnt=$1 ; while [[ "$mnt" == */ ]] ; do mnt=${mnt:0:(-1)} ; done # (remove any tailing slashes)
@ -58,6 +58,7 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp
if [[ ${props[keyformat]:-} == ephemeral ]] ; then if [[ ${props[keyformat]:-} == ephemeral ]] ; then
cryptRoot=${dataset[name]} ; unset props[keyformat] ; props[keylocation]=file:///dev/null cryptRoot=${dataset[name]} ; unset props[keyformat] ; props[keylocation]=file:///dev/null
fi fi
if [[ $explicitKeylocation ]] ; then props[keylocation]=$explicitKeylocation ; fi
unset props[encryption] ; unset props[keyformat] # can't change these anyway unset props[encryption] ; unset props[keyformat] # can't change these anyway
names=$(IFS=, ; echo "${!props[*]}") ; values=$(IFS=$'\n' ; echo "${props[*]}") names=$(IFS=, ; echo "${!props[*]}") ; values=$(IFS=$'\n' ; echo "${props[*]}")
if [[ $values != "$(zfs get -o value -H "$names" "${dataset[name]}")" ]] ; then ( if [[ $values != "$(zfs get -o value -H "$names" "${dataset[name]}")" ]] ; then (
@ -66,27 +67,28 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp
) ; fi ) ; fi
if [[ $cryptRoot && $(zfs get -o value -H encryptionroot "${dataset[name]}") != "$cryptRoot" ]] ; then ( # inherit key from parent (which the parent would also already have done if necessary) if [[ $cryptRoot && $(zfs get -o value -H encryptionroot "${dataset[name]}") != "$cryptRoot" ]] ; then ( # inherit key from parent (which the parent would also already have done if necessary)
parent=$(dirname "${dataset[name]}") if [[ $(zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then
if [[ $(zfs get -o value -H keystatus "$parent") != available ]] ; then zfs load-key -L file://"$cryptKey" "$cryptRoot" ; trap "zfs unload-key $cryptRoot || true" EXIT
zfs load-key -L file://"$cryptKey" "$parent" ; trap "zfs unload-key $parent || true" EXIT
fi fi
if [[ $(zfs get -o value -H keystatus "${dataset[name]}") != available ]] ; then if [[ $(zfs get -o value -H keystatus "${dataset[name]}") != available ]] ; then
zfs load-key -L file://"$cryptKey" "${dataset[name]}" # will unload with parent zfs load-key -L file://"$cryptKey" "${dataset[name]}" # will unload with cryptRoot
fi fi
( set -x ; zfs change-key -i "${dataset[name]}" ) ( set -x ; zfs change-key -i "${dataset[name]}" )
) ; fi ) ; fi
else # create dataset else ( # create dataset
if [[ ${props[keyformat]:-} == ephemeral ]] ; then if [[ ${props[keyformat]:-} == ephemeral ]] ; then
props[encryption]=aes-256-gcm ; props[keyformat]=hex ; props[keylocation]=file:///dev/stdin ; explicitKeylocation=file:///dev/null props[encryption]=aes-256-gcm ; props[keyformat]=hex ; props[keylocation]=file:///dev/stdin ; explicitKeylocation=file:///dev/null
declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done
</dev/urandom tr -dc 0-9a-f | head -c 64 | ( set -x ; zfs create "${args[@]}" "${dataset[name]}" ) </dev/urandom tr -dc 0-9a-f | head -c 64 | ( set -x ; zfs create "${args[@]}" "${dataset[name]}" )
zfs unload-key "${dataset[name]}" zfs unload-key "${dataset[name]}"
else ( else
# TODO: if [[ $cryptRoot && $(zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then ... load key while in this block ... if [[ $cryptRoot && $cryptRoot != ${dataset[name]} && $(zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then
zfs load-key -L file://"$cryptKey" "$cryptRoot" ; trap "zfs unload-key $cryptRoot || true" EXIT
fi
declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done declare -a args=( ) ; for name in "${!props[@]}" ; do args+=( -o "${name}=${props[$name]}" ) ; done
( set -x ; zfs create "${args[@]}" "${dataset[name]}" ) ( set -x ; zfs create "${args[@]}" "${dataset[name]}" )
) ; fi fi
if [[ ${props[canmount]} != off ]] ; then ( if [[ ${props[canmount]} != off ]] ; then (
mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt ; trap "umount '${dataset[name]}'" EXIT mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt ; trap "umount '${dataset[name]}'" EXIT
chmod 000 "$tmpMnt" ; ( chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" ; chmod "${dataset[mode]}" -- "$tmpMnt" ) chmod 000 "$tmpMnt" ; ( chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" ; chmod "${dataset[mode]}" -- "$tmpMnt" )
@ -95,7 +97,7 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp
( set -x ; zfs set keylocation="$explicitKeylocation" "${dataset[name]}" ) ( set -x ; zfs set keylocation="$explicitKeylocation" "${dataset[name]}" )
fi fi
zfs snapshot -r "${dataset[name]}"@empty zfs snapshot -r "${dataset[name]}"@empty
fi ) ; fi
eval 'declare -A allows='"${dataset[permissions]}" eval 'declare -A allows='"${dataset[permissions]}"
for who in "${!allows[@]}" ; do for who in "${!allows[@]}" ; do

View File

@ -61,8 +61,23 @@ in rec {
matches = exp: string: builtins.match exp string != null; matches = exp: string: builtins.match exp string != null;
extractChars = exp: string: let match = (builtins.match "^.*(${exp}).*$" string); in if match == null then null else builtins.head match; extractChars = exp: string: let match = (builtins.match "^.*(${exp}).*$" string); in if match == null then null else builtins.head match;
# If »exp« (which mustn't match across »\n«) matches (a part of) exactly one line in »text«, return that »line« including tailing »\n«, plus the text part »before« and »after«, and the text »without« the line. # If »exp« (which mustn't match across »\n«) matches (a part of) exactly one line in »text«, return that »line« including tailing »\n«, plus the text part »before« and »after«, the text »without« the line, and any »captures« made by »exp«. If »text« does not end in a »\n«, then one will be added (since this function operates on lines).
extractLine = exp: text: let split = builtins.split "([^\n]*${exp}[^\n]*\n)" (builtins.unsafeDiscardStringContext text); get = builtins.elemAt split; ctxify = str: lib.addContextFrom text str; in if builtins.length split != 3 then null else rec { before = ctxify (get 0); line = ctxify (builtins.head (get 1)); after = ctxify (get 2); without = ctxify (before + after); }; # (TODO: The string context stuff is actually required, but why? Shouldn't »builtins.split« propagate the context?) # The »*Anchored« version allows the expression to require to match from the »start« and/or to the »end« of its line, by passing the respective bool(s) as »true«.
extractLineAnchored = exp: start: end: text: let
exp' = "(${if start then "^|\n" else ""})(${if start then "" else "[^\n]*"}(${exp})${if end then "" else "[^\n]*"}\n)"; # First capture group is the optional start anchor, the second one the line itself.
text' = (builtins.unsafeDiscardStringContext (if (lastChar text) == "\n" then text else text + "\n")); # Ensure tailing newline and drop context (since it needs to be added again anyway).
split = builtins.split exp' text';
get = builtins.elemAt split; matches = get 1;
ctxify = str: lib.addContextFrom text str;
in if builtins.length split != 3 then null else rec { # length < 3 => no match ; length < 3 => multiple matches
before = ctxify ((get 0) + (builtins.head matches));
line = ctxify (builtins.elemAt matches 1);
captures = map ctxify (lib.sublist 3 (builtins.length matches) matches);
after = ctxify (get 2);
without = ctxify (before + after);
}; # (TODO: The string context stuff is actually required, but why? Shouldn't »builtins.split« propagate the context?)
extractLine = exp: text: extractLineAnchored exp false false text;
#extractLine = exp: text: let split = builtins.split "([^\n]*${exp}[^\n]*\n)" (builtins.unsafeDiscardStringContext (if (lastChar text) == "\n" then text else text + "\n")); get = builtins.elemAt split; ctxify = str: lib.addContextFrom text str; in if builtins.length split != 3 then null else rec { before = ctxify (get 0); line = ctxify (builtins.head (get 1)); captures = map ctxify (builtins.tail (get 1)); after = ctxify (get 2); without = ctxify (before + after); }; # (TODO: The string context stuff is actually required, but why? Shouldn't »builtins.split« propagate the context?)
# Given a string, returns its first/last char (or last utf-8(?) byte?). # Given a string, returns its first/last char (or last utf-8(?) byte?).
firstChar = string: builtins.substring (0) 1 string; firstChar = string: builtins.substring (0) 1 string;

View File

@ -26,8 +26,8 @@ in {
in lib.mkIf cfg.enable (lib.mkMerge [ ({ in lib.mkIf cfg.enable (lib.mkMerge [ ({
${prefix} = { ${prefix} = {
fs.disks.partitions."boot-${hash}" = { type = "ef00"; size = cfg.size; index = 1; order = 1500; disk = "primary"; }; # require it to be part1, and create it early fs.disks.partitions."boot-${hash}" = { type = lib.mkDefault "ef00"; size = lib.mkDefault cfg.size; index = lib.mkDefault 1; order = lib.mkDefault 1500; disk = lib.mkOptionDefault "primary"; }; # require it to be part1, and create it early
fs.disks.devices = lib.mkIf cfg.createMbrPart { primary = { mbrParts = "1"; extraFDiskCommands = '' fs.disks.devices = lib.mkIf cfg.createMbrPart { primary = { mbrParts = lib.mkDefault "1"; extraFDiskCommands = ''
t;1;c # type ; part1 ; W95 FAT32 (LBA) t;1;c # type ; part1 ; W95 FAT32 (LBA)
a;1 # active/boot ; part1 a;1 # active/boot ; part1
''; }; }; ''; }; };

View File

@ -17,9 +17,9 @@ in {
options.${prefix} = { fs.disks = { options.${prefix} = { fs.disks = {
devices = lib.mkOption { devices = lib.mkOption {
description = "Set of disk devices that this host will be installed on."; description = "Set of disk devices that this host will be installed on.";
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: { options = { type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = {
name = lib.mkOption { description = "Name that this device is being referred to as in other places."; type = lib.types.str; default = name; readOnly = true; }; name = lib.mkOption { description = "Name that this device is being referred to as in other places."; type = lib.types.str; default = name; readOnly = true; };
size = lib.mkOption { description = "The size of the image to create, when using an image for this device, as argument to »fallocate -l«."; type = lib.types.str; default = "16G"; }; size = lib.mkOption { description = "The size of the image to create, when using an image for this device, as argument to »truncate -s«."; type = lib.types.str; default = "16G"; };
serial = lib.mkOption { description = "Serial number of the specific hardware device to use. If set the device path passed to the installer must point to the device with this serial. Use »udevadm info --query=property --name=$DISK | grep -oP 'ID_SERIAL_SHORT=\K.*'« to get the serial."; type = lib.types.nullOr lib.types.str; default = null; }; serial = lib.mkOption { description = "Serial number of the specific hardware device to use. If set the device path passed to the installer must point to the device with this serial. Use »udevadm info --query=property --name=$DISK | grep -oP 'ID_SERIAL_SHORT=\K.*'« to get the serial."; type = lib.types.nullOr lib.types.str; default = null; };
alignment = lib.mkOption { description = "Default alignment quantifier for partitions on this device. Should be at least the optimal physical write size of the device, but going larger at worst wastes this many times the number of partitions disk sectors."; type = lib.types.int; default = 16384; }; alignment = lib.mkOption { description = "Default alignment quantifier for partitions on this device. Should be at least the optimal physical write size of the device, but going larger at worst wastes this many times the number of partitions disk sectors."; type = lib.types.int; default = 16384; };
mbrParts = lib.mkOption { description = "Up to three colon-separated (GPT) partition numbers that will be made available in a hybrid MBR."; type = lib.types.nullOr lib.types.str; default = null; }; mbrParts = lib.mkOption { description = "Up to three colon-separated (GPT) partition numbers that will be made available in a hybrid MBR."; type = lib.types.nullOr lib.types.str; default = null; };
@ -27,12 +27,13 @@ in {
t;1;b # type ; part1 ; W95 FAT32 t;1;b # type ; part1 ; W95 FAT32
a;1 # active/boot ; part1 a;1 # active/boot ; part1
''; }; ''; };
}; })); }; })));
default = { primary = { }; }; default = { };
apply = lib.filterAttrs (k: v: v != null);
}; };
partitions = lib.mkOption { partitions = lib.mkOption {
description = "Set of disks disk partitions that the system will need/use. Partitions will be created on their respective ».disk«s in ».order« using »sgdisk -n X:+0+$size«."; description = "Set of disks disk partitions that the system will need/use. Partitions will be created on their respective ».disk«s in ».order« using »sgdisk -n X:+0+$size«.";
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: { options = { type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = {
name = lib.mkOption { description = "Name/partlabel that this partition can be referred to as once created."; type = lib.types.str; default = name; readOnly = true; }; name = lib.mkOption { description = "Name/partlabel that this partition can be referred to as once created."; type = lib.types.str; default = name; readOnly = true; };
disk = lib.mkOption { description = "Name of the disk that this partition resides on, which will automatically be declared with default options."; type = lib.types.str; default = "primary"; }; disk = lib.mkOption { description = "Name of the disk that this partition resides on, which will automatically be declared with default options."; type = lib.types.str; default = "primary"; };
type = lib.mkOption { description = "»gdisk« partition type of this partition."; type = lib.types.str; }; type = lib.mkOption { description = "»gdisk« partition type of this partition."; type = lib.types.str; };
@ -41,10 +42,12 @@ in {
alignment = lib.mkOption { description = "Adjusted alignment quantifier for this partition only."; type = lib.types.nullOr lib.types.int; default = null; example = 1; }; alignment = lib.mkOption { description = "Adjusted alignment quantifier for this partition only."; type = lib.types.nullOr lib.types.int; default = null; example = 1; };
index = lib.mkOption { description = "Optionally explicit partition table index to place this partition in. Use ».order« to make sure that this index hasn't been used yet.."; type = lib.types.nullOr lib.types.int; default = null; }; index = lib.mkOption { description = "Optionally explicit partition table index to place this partition in. Use ».order« to make sure that this index hasn't been used yet.."; type = lib.types.nullOr lib.types.int; default = null; };
order = lib.mkOption { description = "Creation order ranking of this partition. Higher orders will be created first, and will thus be placed earlier in the partition table (if ».index« isn't explicitly set) and also further to the front of the disk space."; type = lib.types.int; default = 1000; }; order = lib.mkOption { description = "Creation order ranking of this partition. Higher orders will be created first, and will thus be placed earlier in the partition table (if ».index« isn't explicitly set) and also further to the front of the disk space."; type = lib.types.int; default = 1000; };
}; })); }; })));
default = { }; default = { };
apply = lib.filterAttrs (k: v: v != null);
}; };
partitionList = lib.mkOption { description = "Partitions as a sorted list"; type = lib.types.listOf (lib.types.attrsOf lib.types.anything); default = lib.sort (before: after: before.order >= after.order) (lib.attrValues cfg.partitions); readOnly = true; internal = true; }; partitionList = lib.mkOption { description = "Partitions as a sorted list."; type = lib.types.listOf (lib.types.attrsOf lib.types.anything); default = lib.sort (before: after: before.order >= after.order) (lib.attrValues cfg.partitions); readOnly = true; internal = true; };
partitioning = lib.mkOption { description = "The resulting disk partitioning as »sgdisk --backup --print« per disk."; type = lib.types.package; readOnly = true; internal = true; };
# These are less disk-state-describing and more installation-imperative ... # These are less disk-state-describing and more installation-imperative ...
# Also, these are run as root and thee are no security or safety checks ... # Also, these are run as root and thee are no security or safety checks ...
@ -55,7 +58,27 @@ in {
restoreSystemCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; }; restoreSystemCommands = lib.mkOption { description = ""; type = lib.types.lines; default = ""; };
}; }; }; };
config.${prefix} = {
# Create all devices referenced by partitions: # Create all devices referenced by partitions:
config.${prefix}.fs.disks.devices = lib.wip.mapMerge (name: { ${name} = { }; }) (lib.catAttrs "disk" config.${prefix}.fs.disks.partitionList); fs.disks.devices = lib.wip.mapMerge (name: { ${name} = { }; }) (lib.catAttrs "disk" config.${prefix}.fs.disks.partitionList);
fs.disks.partitioning = let
partition-disk = builtins.toFile "partition-disk" (lib.wip.extractBashFunction (builtins.readFile "${dirname}/../../lib/setup-scripts/disk.sh") "partition-disk");
esc = lib.escapeShellArg;
in pkgs.runCommand "partitioning-${config.networking.hostName}" { } ''
${lib.wip.substituteImplicit { inherit pkgs; scripts = [ partition-disk ]; context = { inherit config; native = pkgs; }; }} # inherit (builtins) trace;
mkdir $out ; beQuiet=/dev/stdout
${lib.concatStrings (lib.mapAttrsToList (name: disk: ''
name=${esc name} ; img=$name.img
${pkgs.coreutils}/bin/truncate -s ${esc disk.size} $img
partition-disk $name $img ${toString (lib.wip.parseSizeSuffix disk.size)}
${pkgs.gptfdisk}/bin/sgdisk --backup=$out/$name.backup $img
${pkgs.gptfdisk}/bin/sgdisk --print $img >$out/$name.gpt
${if disk.mbrParts != null then ''
${pkgs.util-linux}/bin/fdisk --type mbr --list $img >$out/$name.mbr
'' else ""}
'') cfg.devices)}
'';
};
} }

View File

@ -92,7 +92,7 @@ in let module = {
fileSystems.${keystore} = { fsType = "vfat"; device = "/dev/mapper/keystore-${hash}"; options = [ "ro" "noatime" "umask=0277" "noauto" ]; formatOptions = ""; }; fileSystems.${keystore} = { fsType = "vfat"; device = "/dev/mapper/keystore-${hash}"; options = [ "ro" "noatime" "umask=0277" "noauto" ]; formatOptions = ""; };
${prefix} = { ${prefix} = {
fs.disks.partitions."keystore-${hash}" = { type = "8309"; order = 1375; disk = "primary"; size = "32M"; }; fs.disks.partitions."keystore-${hash}" = { type = lib.mkDefault "8309"; order = lib.mkDefault 1375; disk = lib.mkDefault "primary"; size = lib.mkDefault "32M"; };
fs.disks.postFormatCommands = '' fs.disks.postFormatCommands = ''
( : 'Copy the live keystore to its primary persistent location:' ( : 'Copy the live keystore to its primary persistent location:'
tmp=$(mktemp -d) ; mount "/dev/mapper/keystore-${hash}" $tmp ; trap "umount $tmp ; rmdir $tmp" EXIT tmp=$(mktemp -d) ; mount "/dev/mapper/keystore-${hash}" $tmp ; trap "umount $tmp ; rmdir $tmp" EXIT

View File

@ -23,16 +23,16 @@ in {
config = let config = let
in ({ in ({
systemd.services = lib.wip.mapMerge (target: { device, preMountCommands, ... }: if (preMountCommands != null) then let systemd.services = lib.wip.mapMerge (target: { device, preMountCommands, depends, ... }: if (preMountCommands != null) then let
isDevice = lib.wip.startsWith "/dev/" device; isDevice = lib.wip.startsWith "/dev/" device;
target' = utils.escapeSystemdPath target; target' = utils.escapeSystemdPath target;
device' = utils.escapeSystemdPath device; device' = utils.escapeSystemdPath device;
mountPoint = "${target'}.mount";
in { "pre-mount-${target'}" = { in { "pre-mount-${target'}" = {
description = "Prepare mounting (to) ${target}"; description = "Prepare mounting (to) ${target}";
wantedBy = [ mountPoint ]; before = [ mountPoint ] ++ (lib.optional isDevice "systemd-fsck@${device'}.service"); wantedBy = [ "${target'}.mount" ]; before = [ "${target'}.mount" ]
++ (lib.optional isDevice "systemd-fsck@${device'}.service"); # TODO: Does this exist for every device? Does depending on it instantiate the template?
requires = lib.optional isDevice "${device'}.device"; after = lib.optional isDevice "${device'}.device"; requires = lib.optional isDevice "${device'}.device"; after = lib.optional isDevice "${device'}.device";
unitConfig.RequiresMountsFor = [ (builtins.dirOf device) (builtins.dirOf target) ]; unitConfig.RequiresMountsFor = depends ++ [ (builtins.dirOf device) (builtins.dirOf target) ];
unitConfig.DefaultDependencies = false; unitConfig.DefaultDependencies = false;
serviceConfig.Type = "oneshot"; script = preMountCommands; serviceConfig.Type = "oneshot"; script = preMountCommands;
}; } else { }) config.fileSystems; }; } else { }) config.fileSystems;

View File

@ -173,12 +173,12 @@ in {
}; };
fs.temproot.local.mounts = { fs.temproot.local.mounts = {
"/nix" = { zfsProps = zfsNoSyncProps; mode = "755"; }; # this (or /nix/store) is required "/nix" = { zfsProps = zfsNoSyncProps; mode = "755"; }; # this (or /nix/store) is required
"/var/log" = { source = "logs"; }; "/var/log" = { source = "logs"; mode = "755"; };
"/local" = { source = "system"; }; "/local" = { source = "system"; mode = "755"; };
# »/swap« is used by »cfg.swap.asPartition = false« # »/swap« is used by »cfg.swap.asPartition = false«
}; };
fs.temproot.remote.mounts = { fs.temproot.remote.mounts = {
"/remote" = { source = "system"; extraFsOptions = { neededForBoot = true; }; }; # if any secrets need to be picked up by »activate«, they should be here "/remote" = { source = "system"; mode = "755"; extraFsOptions = { neededForBoot = true; }; }; # if any secrets need to be picked up by »activate«, they should be here
}; };
}; };
@ -213,7 +213,7 @@ in {
in { in {
${prefix} = { ${prefix} = {
fs.disks.partitions."swap-${hash}" = { type = "8200"; size = cfg.swap.size; order = 1250; }; fs.disks.partitions."swap-${hash}" = { type = lib.mkDefault "8200"; size = lib.mkDefault cfg.swap.size; order = lib.mkDefault 1250; };
fs.keystore.keys."luks/swap-${hash}/0" = lib.mkIf cfg.swap.encrypted (lib.mkOptionDefault "random"); fs.keystore.keys."luks/swap-${hash}/0" = lib.mkIf cfg.swap.encrypted (lib.mkOptionDefault "random");
}; };
swapDevices = [ { device = "/dev/${if useLuks then "mapper" else "disk/by-partlabel"}/swap-${hash}"; } ]; swapDevices = [ { device = "/dev/${if useLuks then "mapper" else "disk/by-partlabel"}/swap-${hash}"; } ];

View File

@ -21,7 +21,7 @@ in let module = {
pools = lib.mkOption { pools = lib.mkOption {
description = "ZFS pools created during this host's installation."; description = "ZFS pools created during this host's installation.";
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: { options = { type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = {
name = lib.mkOption { description = "Attribute name as name of the pool."; type = lib.types.str; default = name; readOnly = true; }; name = lib.mkOption { description = "Attribute name as name of the pool."; type = lib.types.str; default = name; readOnly = true; };
vdevArgs = lib.mkOption { description = "List of arguments that specify the virtual devices (vdevs) used when initially creating the pool. Can consist of the device type keywords and partition labels. The latter are prefixed with »/dev/mapper/« if a mapping with that name is configured or »/dev/disk/by-partlabel/« otherwise, and then the resulting argument sequence is is used verbatim in »zpool create«."; type = lib.types.listOf lib.types.str; default = [ name ]; example = [ "raidz1 data1-..." "data2-..." "data3-..." "cache" "cache-..." ]; }; vdevArgs = lib.mkOption { description = "List of arguments that specify the virtual devices (vdevs) used when initially creating the pool. Can consist of the device type keywords and partition labels. The latter are prefixed with »/dev/mapper/« if a mapping with that name is configured or »/dev/disk/by-partlabel/« otherwise, and then the resulting argument sequence is is used verbatim in »zpool create«."; type = lib.types.listOf lib.types.str; default = [ name ]; example = [ "raidz1 data1-..." "data2-..." "data3-..." "cache" "cache-..." ]; };
props = lib.mkOption { description = "Zpool properties to pass when creating the pool. May also set »feature@...« and »compatibility«."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; }; props = lib.mkOption { description = "Zpool properties to pass when creating the pool. May also set »feature@...« and »compatibility«."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; };
@ -31,13 +31,14 @@ in let module = {
props.ashift = lib.mkOptionDefault "12"; # be explicit props.ashift = lib.mkOptionDefault "12"; # be explicit
props.comment = lib.mkOptionDefault "hostname=${config.networking.hostName};"; # This is just nice to know without needing to inspect the datasets. props.comment = lib.mkOptionDefault "hostname=${config.networking.hostName};"; # This is just nice to know without needing to inspect the datasets.
props.cachefile = lib.mkOptionDefault "none"; # If it works on first boot without (stateful) cachefile, then it will also do so later. props.cachefile = lib.mkOptionDefault "none"; # If it works on first boot without (stateful) cachefile, then it will also do so later.
}; })); }; })));
default = { }; default = { };
apply = lib.filterAttrs (k: v: v != null);
}; };
datasets = lib.mkOption { datasets = lib.mkOption {
description = "ZFS datasets managed and mounted on this host."; description = "ZFS datasets managed and mounted on this host.";
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: { options = { type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = {
name = lib.mkOption { description = "Attribute name as name of the pool."; type = lib.types.str; default = name; readOnly = true; }; name = lib.mkOption { description = "Attribute name as name of the pool."; type = lib.types.str; default = name; readOnly = true; };
props = lib.mkOption { description = "ZFS properties to set on the dataset."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; }; props = lib.mkOption { description = "ZFS properties to set on the dataset."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; };
mount = lib.mkOption { description = "Whether to create a »fileSystems« entry to mount the dataset. »noauto« creates an entry with that option set."; type = lib.types.enum [ true "noauto" false ]; default = false; }; mount = lib.mkOption { description = "Whether to create a »fileSystems« entry to mount the dataset. »noauto« creates an entry with that option set."; type = lib.types.enum [ true "noauto" false ]; default = false; };
@ -47,8 +48,9 @@ in let module = {
mode = lib.mkOption { description = "Permission mode of the dataset's root directory."; type = lib.types.str; default = "750"; }; mode = lib.mkOption { description = "Permission mode of the dataset's root directory."; type = lib.types.str; default = "750"; };
}; config = { }; config = {
props.canmount = lib.mkOptionDefault "off"; # (need to know this explicitly for each dataset) props.canmount = lib.mkOptionDefault "off"; # (need to know this explicitly for each dataset)
}; })); }; })));
default = { }; default = { };
apply = lib.filterAttrs (k: v: v != null);
}; };
extraInitrdPools = lib.mkOption { description = "Additional pool that are imported in the initrd."; type = lib.types.listOf lib.types.str; default = [ ]; apply = lib.unique; }; extraInitrdPools = lib.mkOption { description = "Additional pool that are imported in the initrd."; type = lib.types.listOf lib.types.str; default = [ ]; apply = lib.unique; };

View File

@ -35,7 +35,7 @@ in {
serviceConfig.ExecStart = lib.concatStringsSep "" [ serviceConfig.ExecStart = lib.concatStringsSep "" [
"${pkgs.dropbear}/bin/dropbear" "${pkgs.dropbear}/bin/dropbear"
" -F -E" # don't fork, use stderr " -F -E" # don't fork, use stderr
" -p 22" # handle a single connection on stdio " -p 22" # listen on TCP/22
" -R" # generate host keys on connection " -R" # generate host keys on connection
#" -r .../dropbear_rsa_host_key" #" -r .../dropbear_rsa_host_key"
]; ];