diff --git a/.vscode/settings.json b/.vscode/settings.json index bfc2ff9..3af5579 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -8,6 +8,7 @@ "cSpell.diagnosticLevel": "Information", // to find spelling mistakes "cSpell.words": [ "aarch64", // processor architecture + "AAVMF", // qemu aarch64 "acltype", // zfs "acpi", // abbr "ahci", // abbr diff --git a/example/install.sh.md b/example/install.sh.md index 1b6234b..49593ff 100644 --- a/example/install.sh.md +++ b/example/install.sh.md @@ -12,10 +12,10 @@ See its [README](../lib/setup-scripts/README.md) for more documentation. ```bash # Replace the entry point with the same function: -function install-system {( set -o pipefail -u # (void) +function install-system {( # 1: diskPaths trap - EXIT # start with empty traps for sub-shell - prepare-installer || exit - do-disk-setup "${argv[0]}" || exit + prepare-installer "$@" || exit + do-disk-setup "$1" || exit install-system-to $mnt || exit )} diff --git a/lib/flakes.nix b/lib/flakes.nix index b549c1f..68acbbc 100644 --- a/lib/flakes.nix +++ b/lib/flakes.nix @@ -1,6 +1,6 @@ dirname: inputs@{ self, nixpkgs, ...}: let inherit (nixpkgs) lib; - inherit (import "${dirname}/vars.nix" dirname inputs) mapMerge mapMergeUnique mergeAttrsUnique flipNames; + inherit (import "${dirname}/vars.nix" dirname inputs) namesToAttrs mapMerge mapMergeUnique mergeAttrsUnique flipNames; inherit (import "${dirname}/imports.nix" dirname inputs) getNixFiles importWrapped getOverlaysFromInputs getModulesFromInputs; inherit (import "${dirname}/scripts.nix" dirname inputs) substituteImplicit extractBashFunction; setup-scripts = (import "${dirname}/setup-scripts" "${dirname}/setup-scripts" inputs); @@ -9,36 +9,39 @@ dirname: inputs@{ self, nixpkgs, ...}: let in rec { # Simplified implementation of »flake-utils.lib.eachSystem«. - forEachSystem = systems: do: flipNames (mapMerge (arch: { ${arch} = do arch; }) systems); + forEachSystem = systems: getSystemOutputs: flipNames (namesToAttrs getSystemOutputs systems); # Sooner or later this should be implemented in nix itself, for now require »inputs.nixpkgs« and a system that can run »x86_64-linux« (native or through qemu). patchFlakeInputs = inputs: patches: outputs: let inherit ((import inputs.nixpkgs { overlays = [ ]; config = { }; system = "x86_64-linux"; }).pkgs) applyPatches fetchpatch; in outputs (builtins.mapAttrs (name: input: if name != "self" && patches?${name} && patches.${name} != [ ] then (let patched = applyPatches { - name = "${name}-patched"; src = "${input}"; + name = "${name}-patched"; src = "${input.sourceInfo or input}"; patches = map (patch: if patch ? url then fetchpatch patch else patch) patches.${name}; }; - sourceInfo = (input.sourceInfo or input) // patched; + sourceInfo = (builtins.removeAttrs (input.sourceInfo or input) [ "narHash"]) // patched; # (keeps (short)rev, which is not really correct) + dir = if input?sourceInfo.outPath && lib.hasPrefix input.outPath input.sourceInfo.outPath then lib.removePrefix input.sourceInfo.outPath input.outPath else ""; # this should work starting with nix version 2.14 (before, they are the same path) in ( - # sourceInfo = { lastModified; narHash; rev; lastModifiedDate; outPath; shortRev; } + # sourceInfo = { lastModified; lastModifiedDate; narHash; outPath; rev?; shortRev?; } # A non-flake has only the attrs of »sourceInfo«. # A flake has »{ inputs; outputs; sourceInfo; } // outputs // sourceInfo«, where »inputs« is what's passed to the outputs function without »self«, and »outputs« is the result of calling the outputs function. Don't know the merge priority. + # Since nix v2.14, the direct »outPath« has the relative location of the »dir« containing the »flake.nix« as suffix (if not ""). if (!input?sourceInfo) then sourceInfo else (let - outputs = (import "${patched.outPath}/flake.nix").outputs ({ self = sourceInfo // outputs; } // input.inputs); - in outputs // sourceInfo // { inherit (input) inputs; inherit outputs; inherit sourceInfo; }) + outputs = (import "${patched.outPath}${dir}/flake.nix").outputs ({ self = sourceInfo // outputs; } // input.inputs); + in outputs // sourceInfo // { outPath = "${patched.outPath}${dir}"; inherit (input) inputs; inherit outputs; inherit sourceInfo; }) )) else input) inputs); # Generates implicit flake outputs by importing conventional paths in the local repo. E.g.: # outputs = inputs@{ self, nixpkgs, wiplib, ... }: wiplib.lib.wip.importRepo inputs ./. (repo@{ overlays, lib, ... }: let ... in [ repo ... ]) - importRepo = inputs: repoPath': outputs: let - repoPath = builtins.path { path = repoPath'; name = "source"; }; # referring to the current flake directory as »./.« is quite intuitive (and »inputs.self.outPath« causes infinite recursion), but without this it adds another hash to the path (because it copies it) + importRepo = inputs: flakePath': outputs: let + flakePath = builtins.path { path = flakePath'; name = "source"; }; # Referring to the current flake directory as »./.« is quite intuitive (and »inputs.self.outPath« causes infinite recursion), but without this it adds another hash to the path (because it copies it). For flakes with »dir != ""«, this includes only the ».« directory, making references to »./..« invalid, but ensuring that »./flake.nix« exists (there), and the below default paths are relative to that (and not whatever nix thought is the root of the repo). + # TODO: should _not_ do the above if it is not a direct store path in let result = (outputs ( - (let it = importWrapped inputs "${repoPath}/lib"; in if it.exists then { + (let it = importWrapped inputs "${flakePath}/lib"; in if it.exists then { lib = it.result; - } else { }) // (let it = importWrapped inputs "${repoPath}/overlays"; in if it.exists then { + } else { }) // (let it = importWrapped inputs "${flakePath}/overlays"; in if it.exists then { overlays = { default = final: prev: builtins.foldl' (prev: overlay: prev // (overlay final prev)) prev (builtins.attrValues it.result); } // it.result; - } else { }) // (let it = importWrapped inputs "${repoPath}/modules"; in if it.exists then { + } else { }) // (let it = importWrapped inputs "${flakePath}/modules"; in if it.exists then { nixosModules = { default = { imports = builtins.attrValues it.result; }; } // it.result; } else { }) )); in if (builtins.isList result) then mergeOutputs result else result; @@ -50,9 +53,9 @@ in rec { # # local: ./overlays/patches/nixpkgs-###.patch # (use long native path to having the path change if any of the other files in ./. change) # ]; # ... # }; in inputs.wiplib.lib.wip.patchFlakeInputsAndImportRepo inputs patches ./. (inputs@{ self, nixpkgs, ... }: repo@{ nixosModules, overlays, lib, ... }: let ... in [ repo ... ]) - patchFlakeInputsAndImportRepo = inputs: patches: repoPath: outputs: ( - patchFlakeInputs inputs patches (inputs: importRepo inputs repoPath (outputs (inputs // { - self = inputs.self // { outPath = builtins.path { path = repoPath; name = "source"; }; }; # If the »flake.nix is in a sub dir of a repo, "${inputs.self}" would otherwise refer to the parent. (?) + patchFlakeInputsAndImportRepo = inputs: patches: flakePath: outputs: ( + patchFlakeInputs inputs patches (inputs: importRepo inputs flakePath (outputs (inputs // { + self = inputs.self // { outPath = builtins.path { path = flakePath; name = "source"; }; }; # If the »flake.nix is in a sub dir of a repo, "${inputs.self}" would otherwise refer to the parent. (?) }))) ); @@ -235,7 +238,7 @@ in rec { # Where »REPO« is the path to a flake repo using »mkSystemsFlake« for it's »apps« output, and »HOST« is the name of a host it defines. # If the first argument (after »--«) is »sudo«, then the program will re-execute itself as root using sudo (minus that »sudo« argument). # If the (then) first argument is »bash«, or if there are no (more) arguments or options, it will execute an interactive shell with the variables and functions sourced. - # If an option »--command« is supplied, then the first argument evaluated as bash instructions, otherwise the first argument is called as a function (or program). + # If an option »--command« is supplied, then the first positional argument is `eval`ed as bash instructions, otherwise the first argument is called as a function (or program). # Either way, the remaining arguments and options have been parsed by »generic-arg-parse« and are available in »argv« and »args«. # Examples: # Install the host named »$target« to the image file »/tmp/system-$target.img«: @@ -252,23 +255,21 @@ in rec { in pkgs.writeShellScript "scripts-${name}" '' # if first arg is »sudo«, re-execute this script with sudo (as root) - if [[ $1 == sudo ]] ; then shift ; exec sudo --preserve-env=SSH_AUTH_SOCK -- "$0" "$@" ; fi + if [[ ''${1:-} == sudo ]] ; then shift ; exec sudo --preserve-env=SSH_AUTH_SOCK -- "$0" "$@" ; fi # if the (now) first arg is »bash« or there are no args, re-execute this script as bash »--init-file«, starting an interactive bash in the context of the script - if [[ $1 == bash ]] || [[ $# == 0 && $0 == *-scripts-${name} ]] ; then - exec ${pkgs.bashInteractive}/bin/bash --init-file <(cat << "EOS"${"\n"+'' + if [[ ''${1:-} == bash ]] || [[ $# == 0 && $0 == *-scripts-${name} ]] ; then + shift ; exec ${pkgs.bashInteractive}/bin/bash --init-file <(cat << "EOS"${"\n"+'' # prefix the script to also include the default init files ! [[ -e /etc/profile ]] || . /etc/profile for file in ~/.bash_profile ~/.bash_login ~/.profile ; do if [[ -r $file ]] ; then . $file ; break ; fi done ; unset $file - set -o pipefail -o nounset # (do not rely on errexit) - declare -A args=( ) ; declare -a argv=( ) # some functions expect these # add active »hostName« to shell prompt PS1=''${PS1/\\$/\\[\\e[93m\\](${name})\\[\\e[97m\\]\\$} ''}EOS - cat $0) -i + cat $0) -i -s ':' "$@" fi # provide installer tools (native to localSystem, not targetSystem) @@ -291,11 +292,11 @@ in rec { # either call »argv[0]« with the remaining parameters as arguments, or if »$1« is »-c« eval »$2«. if [[ ''${args[debug]:-} || ''${args[trace]:-} ]] ; then set -x ; fi if [[ ''${args[command]:-} ]] ; then - command=''${argv[0]:?'With --command, the first positional argument must specify the commands to run.'} - argv=( "''${argv[@]:1}" ) ; set -- "''${argv[@]}" ; eval "$command" + command=''${argv[0]:?'With --command, the first positional argument must specify the commands to run.'} || exit + argv=( "''${argv[@]:1}" ) ; set -- "''${argv[@]}" ; eval "$command" || exit else - entry=''${argv[0]:?} - argv=( "''${argv[@]:1}" ) ; "$entry" "''${argv[@]}" + entry=''${argv[0]:?} || exit + argv=( "''${argv[@]:1}" ) ; "$entry" "''${argv[@]}" || exit fi ''; diff --git a/lib/setup-scripts/README.md b/lib/setup-scripts/README.md index 5e6e515..cd52929 100644 --- a/lib/setup-scripts/README.md +++ b/lib/setup-scripts/README.md @@ -9,10 +9,10 @@ Any script passed later in `scripts` can overwrite the functions of these (earli With the functions from here, [a simple three-liner](./install.sh) is enough to do a completely automated NixOS installation: ```bash -function install-system {( set -eu # 1: diskPaths - prepare-installer "$@" - do-disk-setup "${argv[0]}" - install-system-to $mnt +function install-system {( # 1: diskPaths + prepare-installer "$@" || exit + do-disk-setup "${argv[0]}" || exit + install-system-to $mnt || exit )} ``` diff --git a/lib/setup-scripts/disk.sh b/lib/setup-scripts/disk.sh index 2861a11..2a87cb2 100644 --- a/lib/setup-scripts/disk.sh +++ b/lib/setup-scripts/disk.sh @@ -6,12 +6,12 @@ ## Prepares the disks of the target system for the copying of files. function do-disk-setup { # 1: diskPaths + ensure-disks "$1" || return prompt-for-user-passwords || return populate-keystore || return mnt=/tmp/nixos-install-@{config.networking.hostName} && mkdir -p "$mnt" && prepend_trap "rmdir $mnt" EXIT || return # »mnt=/run/user/0/...« would be more appropriate, but »nixos-install« does not like the »700« permissions on »/run/user/0« - ensure-disks "$1" || return partition-disks || return create-luks-layers && open-luks-layers || return # other block layers would go here too (but figuring out their dependencies would be difficult) run-hook-script 'Post Partitioning' @{config.wip.fs.disks.postPartitionCommands!writeText.postPartitionCommands} || return @@ -51,6 +51,7 @@ function ensure-disks { # 1: diskPaths, 2?: skipLosetup fi local name ; for name in "@{!config.wip.fs.disks.devices[@]}" ; do + if [[ ! @{config.wip.fs.disks.devices!catAttrSets.partitionDuringInstallation[$name]} ]] ; then continue ; fi if [[ ! ${blockDevs[$name]:-} ]] ; then echo "Path for block device $name not provided" 1>&2 ; \return 1 ; fi eval 'local -A disk='"@{config.wip.fs.disks.devices[$name]}" if [[ ${blockDevs[$name]} != /dev/* ]] ; then @@ -58,9 +59,9 @@ function ensure-disks { # 1: diskPaths, 2?: skipLosetup install -m 640 -T /dev/null "$outFile" && truncate -s "${disk[size]}" "$outFile" || return if [[ ${args[image-owner]:-} ]] ; then chown "${args[image-owner]}" "$outFile" || return ; fi if [[ ${2:-} ]] ; then continue ; fi - blockDevs[$name]=$( losetup --show -f "$outFile" ) && prepend_trap "losetup -d '${blockDevs[$name]}'" EXIT || return + blockDevs[$name]=$( @{native.util-linux}/bin/losetup --show -f "$outFile" ) && prepend_trap "@{native.util-linux}/bin/losetup -d '${blockDevs[$name]}'" EXIT || return else - local size=$( blockdev --getsize64 "${blockDevs[$name]}" || : ) ; local waste=$(( size - ${disk[size]} )) + local size=$( @{native.util-linux}/bin/blockdev --getsize64 "${blockDevs[$name]}" || : ) ; local waste=$(( size - ${disk[size]} )) if [[ ! $size ]] ; then echo "Block device $name does not exist at ${blockDevs[$name]}" 1>&2 ; \return 1 ; fi if (( waste < 0 )) ; then echo "Block device ${blockDevs[$name]}'s size $size is smaller than the size ${disk[size]} declared for $name" 1>&2 ; \return 1 ; fi if (( waste > 0 )) && [[ ! ${disk[allowLarger]:-} ]] ; then echo "Block device ${blockDevs[$name]}'s size $size is bigger than the size ${disk[size]} declared for $name" 1>&2 ; \return 1 ; fi @@ -83,9 +84,10 @@ function partition-disks { done for name in "@{!config.wip.fs.disks.devices[@]}" ; do + if [[ ! @{config.wip.fs.disks.devices!catAttrSets.partitionDuringInstallation[$name]} ]] ; then continue ; fi eval 'local -A disk='"@{config.wip.fs.disks.devices[$name]}" if [[ ${disk[serial]:-} ]] ; then - actual=$( udevadm info --query=property --name="$blockDev" | grep -oP 'ID_SERIAL_SHORT=\K.*' || echo '' ) + actual=$( @{native.systemd}/bin/udevadm info --query=property --name="$blockDev" | grep -oP 'ID_SERIAL_SHORT=\K.*' || echo '' ) if [[ ${disk[serial]} != "$actual" ]] ; then echo "Block device $blockDev's serial ($actual) does not match the serial (${disk[serial]}) declared for ${disk[name]}" 1>&2 ; \return 1 ; fi fi # can (and probably should) restore the backup: @@ -96,7 +98,8 @@ function partition-disks { @{native.systemd}/bin/udevadm settle -t 15 || true # sometimes partitions aren't quite made available yet # ensure that filesystem creation does not complain about the devices already being occupied by a previous filesystem - wipefs --all "@{config.wip.fs.disks.partitions!attrNames[@]/#/'/dev/disk/by-partlabel/'}" >$beLoud 2>$beSilent || return + local toWipe=( ) ; for part in "@{config.wip.fs.disks.partitions!attrNames[@]/#/'/dev/disk/by-partlabel/'}" ; do [[ ! -e "$part" ]] || toWipe+=( "$part" ) ; done + @{native.util-linux}/bin/wipefs --all "${toWipe[@]}" >$beLoud 2>$beSilent || return #/dev/null #for part in "@{config.wip.fs.disks.partitions!attrNames[@]/#/'/dev/disk/by-partlabel/'}" ; do @{native.util-linux}/bin/blkdiscard -f "$part" || return ; done } @@ -190,7 +193,7 @@ function format-partitions { elif [[ $swapDev == /dev/mapper/* ]] ; then if [[ ! @{config.boot.initrd.luks.devices!catAttrSets.device[${swapDev/'/dev/mapper/'/}]:-} ]] ; then echo "LUKS device $swapDev used for SWAP does not point at one of the device mappings @{!config.boot.initrd.luks.devices!catAttrSets.device[@]}" 1>&2 ; \return 1 ; fi else continue ; fi - ( ${_set_x:-:} ; mkswap "$swapDev" >$beLoud 2>$beSilent ) || return + ( PATH=@{native.util-linux}/bin ; ${_set_x:-:} ; mkswap "$swapDev" >$beLoud 2>$beSilent ) || return done } @@ -204,7 +207,7 @@ function fix-grub-install { device=$( eval 'declare -A fs='"@{config.fileSystems[$mount]}" ; echo "${fs[device]}" ) label=${device/\/dev\/disk\/by-partlabel\//} if [[ $label == "$device" || $label == *' '* || ' '@{config.wip.fs.disks.partitions!attrNames[@]}' ' != *' '$label' '* ]] ; then echo "" 1>&2 ; \return 1 ; fi - bootLoop=$( losetup --show -f /dev/disk/by-partlabel/$label ) || return ; prepend_trap "losetup -d $bootLoop" EXIT + bootLoop=$( @{native.util-linux}/bin/losetup --show -f /dev/disk/by-partlabel/$label ) || return ; prepend_trap "@{native.util-linux}/bin/losetup -d $bootLoop" EXIT ln -sfT ${bootLoop/\/dev/..\/..} /dev/disk/by-partlabel/$label || return done #umount $mnt/boot/grub || true ; umount $mnt/boot || true ; mount $mnt/boot || true ; mount $mnt/boot/grub || true @@ -213,7 +216,7 @@ function fix-grub-install { ## Mounts all file systems as it would happen during boot, but at path prefix »$mnt« (instead of »/«). -function mount-system {( set -eu # 1: mnt, 2?: fstabPath, 3?: allowFail +function mount-system {( # 1: mnt, 2?: fstabPath, 3?: allowFail # While not generally required for fstab, nixos uses the dependency-sorted »config.system.build.fileSystems« list (instead of plain »builtins.attrValues config.fileSystems«) to generate »/etc/fstab« (provided »config.fileSystems.*.depends« is set correctly, e.g. for overlay mounts). # This function depends on the file at »fstabPath« to be sorted like that. @@ -225,7 +228,7 @@ function mount-system {( set -eu # 1: mnt, 2?: fstabPath, 3?: allowFail if [[ ! $target || $target == none ]] ; then continue ; fi options=,$options, ; options=${options//,ro,/,} - if ! mountpoint -q "$mnt"/"$target" ; then ( + if ! @{native.util-linux}/bin/mountpoint -q "$mnt"/"$target" ; then ( mkdir -p "$mnt"/"$target" || exit [[ $type == tmpfs || $type == auto || $type == */* ]] || @{native.kmod}/bin/modprobe --quiet $type || true # (this does help sometimes) @@ -241,19 +244,19 @@ function mount-system {( set -eu # 1: mnt, 2?: fstabPath, 3?: allowFail source=$mnt/$source ; if [[ ! -e $source ]] ; then mkdir -p "$source" || exit ; fi fi - mount -t $type -o "${options:1:(-1)}" "$source" "$mnt"/"$target" || exit + @{native.util-linux}/bin/mount -t $type -o "${options:1:(-1)}" "$source" "$mnt"/"$target" || exit ) || [[ $options == *,nofail,* || $allowFail ]] || exit ; fi # (actually, nofail already makes mount fail silently) done 3< <( <$fstabPath grep -v '^#' ) )} ## Unmounts all file systems (that would be mounted during boot / by »mount-system«). -function unmount-system {( set -eu # 1: mnt, 2?: fstabPath - mnt=$1 ; fstabPath=${2:-"@{config.system.build.toplevel}/etc/fstab"} +function unmount-system { # 1: mnt, 2?: fstabPath + local mnt=$1 ; local fstabPath=${2:-"@{config.system.build.toplevel}/etc/fstab"} while read -u3 source target rest ; do if [[ ! $target || $target == none ]] ; then continue ; fi - if mountpoint -q "$mnt"/"$target" ; then - umount "$mnt"/"$target" + if @{native.util-linux}/bin/mountpoint -q "$mnt"/"$target" ; then + @{native.util-linux}/bin/umount "$mnt"/"$target" || return fi done 3< <( { <$fstabPath grep -v '^#' ; echo ; } | tac ) -)} +} diff --git a/lib/setup-scripts/install.sh b/lib/setup-scripts/install.sh index f4ba3b9..efcaf37 100644 --- a/lib/setup-scripts/install.sh +++ b/lib/setup-scripts/install.sh @@ -4,25 +4,25 @@ ## ## Entry point to the installation, see »./README.md«. -function install-system {( set -o pipefail -u # (void) +function install-system {( # 1: diskPaths trap - EXIT # start with empty traps for sub-shell - prepare-installer || exit - do-disk-setup "${argv[0]}" || exit + prepare-installer "$@" || exit + do-disk-setup "$1" || exit install-system-to $mnt || exit )} ## Does some argument validation, performs some sanity checks, includes a hack to make installation work when nix isn't installed for root, and runs the installation in qemu (if requested). -function prepare-installer { # (void) +function prepare-installer { # 1: diskPaths - : ${argv[0]:?"Required: Target disk or image paths."} + : ${1:?"The first positional argument must specify the path(s) to the disk(s) and/or image file(s) to install to"} umask g-w,o-w # Ensure that files created without explicit permissions are not writable for group and other (0022). if [[ "$(id -u)" != '0' ]] ; then - if [[ ! ${args[no-vm]:-} ]] ; then reexec-in-qemu || return ; \exit 0 ; fi + if [[ ! ${args[no-vm]:-} ]] ; then reexec-in-qemu "$@" || return ; \exit 0 ; fi echo 'Script must be run as root or in qemu (without »--no-vm«).' 1>&2 ; \return 1 fi - if [[ ${args[vm]:-} ]] ; then reexec-in-qemu || return ; \exit 0 ; fi + if [[ ${args[vm]:-} ]] ; then reexec-in-qemu "$@" || return ; \exit 0 ; fi if [[ -e "/run/keystore-@{config.networking.hostName!hashString.sha256:0:8}" ]] ; then echo "Keystore »/run/keystore-@{config.networking.hostName!hashString.sha256:0:8}/« is already open. Close it and remove the mountpoint before running the installer." 1>&2 ; \return 1 ; fi @@ -42,10 +42,6 @@ function prepare-installer { # (void) _set_x='set -x' ; if [[ ${args[quiet]:-} ]] ; then _set_x=: ; fi - #if [[ ${args[debug]:-} ]] ; then set +e ; set -E ; trap 'code= ; timeout .2s cat &>/dev/null || true ; @{native.bashInteractive}/bin/bash --init-file @{config.environment.etc.bashrc.source} || code=$? ; if [[ $code ]] ; then exit $code ; fi' ERR ; fi # On error, instead of exiting straight away, open a shell to allow diagnosing/fixing the issue. Only exit if that shell reports failure (e.g. CtrlC + CtrlD). Unfortunately, the exiting has to be repeated for each level of each nested sub-shells. The »timeout cat« eats anything lined up on stdin, which would otherwise be sent to bash and interpreted as commands. - - export PATH=$PATH:@{native.util-linux}/bin # Doing a system installation requires a lot of stuff from »util-linux«. This should probably be moved into the individual functions that actually use the tools ... - } ## Re-executes the current system's installation in a qemu VM. @@ -54,7 +50,7 @@ function reexec-in-qemu { if [[ @{pkgs.buildPackages.system} != "@{native.system}" ]] ; then echo "VM installation (implicit when not running as root) of a system built on a different ISA than the current host's is not supported (yet)." 1>&2 ; \return 1 ; fi # (not sure whether this works for block devices) - ensure-disks "${argv[0]}" 1 || return + ensure-disks "$1" 1 || return qemu=( -m 2048 ) ; declare -A qemuDevs=( ) local index=2 ; local name ; for name in "${!blockDevs[@]}" ; do #if [[ ${blockDevs[$name]} != /dev/* ]] ; then @@ -71,12 +67,12 @@ function reexec-in-qemu { args[vm]='' ; args[no-vm]=1 newArgs=( ) ; for arg in "${!args[@]}" ; do newArgs+=( --"$arg"="${args[$arg]}" ) ; done devSpec= ; for name in "${!qemuDevs[@]}" ; do devSpec+="$name"="${qemuDevs[$name]}": ; done - newArgs+=( ${devSpec%:} ) ; (( ${#argv[@]} > 1 )) && args+=( "${argv[@]:1}" ) + newArgs+=( ${devSpec%:} ) ; shift ; (( $# == 0 )) || args+=( "$@" ) # (( ${#argv[@]} > 1 )) && args+=( "${argv[@]:1}" ) #local output=@{inputs.self}'#'nixosConfigurations.@{outputName:?}.config.system.build.vmExec local output=@{config.system.build.vmExec.drvPath!unsafeDiscardStringContext} # this is more accurate, but also means another system needs to get evaluated every time local scripts=$0 ; if [[ @{pkgs.system} != "@{native.system}" ]] ; then - scripts=$( build-lazy @{inputs.self}'#'apps.@{pkgs.system}.@{outputName:?}.derivation ) + scripts=$( build-lazy @{inputs.self}'#'apps.@{pkgs.system}.@{outputName:?}.derivation ) || return fi local command="$scripts install-system $( printf '%q ' "${newArgs[@]}" ) || exit" @@ -87,13 +83,14 @@ function reexec-in-qemu { ## The default command that will activate the system and install the bootloader. In a separate function to make it easy to replace. -function nixos-install-cmd {( set -eu # 1: mnt, 2: topLevel +function nixos-install-cmd {( # 1: mnt, 2: topLevel # »nixos-install« by default does some stateful things (see »--no-root-passwd« »--no-channel-copy«), builds and copies the system config, registers the system (»nix-env --profile /nix/var/nix/profiles/system --set $targetSystem«), and then calls »NIXOS_INSTALL_BOOTLOADER=1 nixos-enter -- $topLevel/bin/switch-to-configuration boot«, which is essentially the same as »NIXOS_INSTALL_BOOTLOADER=1 nixos-enter -- @{config.system.build.installBootLoader} $targetSystem«, i.e. the side effects of »nixos-enter« and then calling the bootloader-installer. - #PATH=@{config.systemd.package}/bin:@{native.nix}/bin:$PATH TMPDIR=/tmp LC_ALL=C @{native.nixos-install-tools}/bin/nixos-install --system "$2" --no-root-passwd --no-channel-copy --root "$1" || exit # We did most of this, so just install the bootloader: + #PATH=@{native.nix}/bin:$PATH:@{config.systemd.package}/bin TMPDIR=/tmp LC_ALL=C @{native.nixos-install-tools}/bin/nixos-install --system "$2" --no-root-passwd --no-channel-copy --root "$1" || exit # We did most of this, so just install the bootloader: export NIXOS_INSTALL_BOOTLOADER=1 # tells some bootloader installers (systemd & grub) to not skip parts of the installation - @{native.nixos-install-tools}/bin/nixos-enter --silent --root "$1" -- @{config.system.build.installBootLoader} "$2" || exit + #( export LC_ALL=C ; PATH=$PATH:@{native.util-linux}/bin:@{native.nixos-install-tools}/bin/ ; ${_set_x:-:} ; nixos-enter --silent --root "$1" -- @{config.system.build.installBootLoader} "$2" ) || exit + LC_ALL=C PATH=$PATH:@{native.util-linux}/bin @{native.nixos-install-tools}/bin/nixos-enter --silent --root "$1" -c "${_set_x:-:} ; @{config.system.build.installBootLoader} $2" || exit )} ## Copies the system's dependencies to the disks mounted at »$mnt« and installs the bootloader. If »$inspect« is set, a root shell will be opened in »$mnt« afterwards. @@ -108,10 +105,10 @@ function install-system-to {( set -u # 1: mnt # Link/create files that some tooling expects: mkdir -p -m 755 $mnt/nix/var/nix || exit ; mkdir -p -m 1775 $mnt/nix/store || exit mkdir -p $mnt/etc $mnt/run || exit ; mkdir -p -m 1777 $mnt/tmp || exit - mount tmpfs -t tmpfs $mnt/run || exit ; prepend_trap "umount -l $mnt/run" EXIT || exit # If there isn't anything mounted here, »activate« will mount a tmpfs (inside »nixos-enter«'s private mount namespace). That would hide the additions below. + @{native.util-linux}/bin/mount tmpfs -t tmpfs $mnt/run || exit ; prepend_trap "@{native.util-linux}/bin/umount -l $mnt/run" EXIT || exit # If there isn't anything mounted here, »activate« will mount a tmpfs (inside »nixos-enter«'s private mount namespace). That would hide the additions below. [[ -e $mnt/etc/NIXOS ]] || touch $mnt/etc/NIXOS || exit # for »switch-to-configuration« [[ -e $mnt/etc/mtab ]] || ln -sfn /proc/mounts $mnt/etc/mtab || exit - ln -sT $(realpath $targetSystem) $mnt/run/current-system || exit + ln -sT $( realpath $targetSystem ) $mnt/run/current-system || exit #mkdir -p /nix/var/nix/db # »nixos-containers« requires this but nothing creates it before nix is used. BUT »nixos-enter« screams: »/nix/var/nix/db exists and is not a regular file.« # If the system configuration is supposed to be somewhere on the system, might as well initialize that: @@ -150,7 +147,7 @@ function install-system-to {( set -u # 1: mnt ln -sT /nix/var/nix/profiles $mnt/nix/var/nix/gcroots/profiles || exit # Run the main install command (primarily for the bootloader): - mount -o bind,ro /nix/store $mnt/nix/store || exit ; prepend_trap '! mountpoint -q $mnt/nix/store || umount -l $mnt/nix/store' EXIT || exit # all the things required to _run_ the system are copied, but (may) need some more things to initially install it and/or enter the chroot (like qemu, see above) + @{native.util-linux}/bin/mount -o bind,ro /nix/store $mnt/nix/store || exit ; prepend_trap '! @{native.util-linux}/bin/mountpoint -q $mnt/nix/store || @{native.util-linux}/bin/umount -l $mnt/nix/store' EXIT || exit # all the things required to _run_ the system are copied, but (may) need some more things to initially install it and/or enter the chroot (like qemu, see above) run-hook-script 'Pre Installation' @{config.wip.fs.disks.preInstallCommands!writeText.preInstallCommands} || exit code=0 ; nixos-install-cmd $mnt "${topLevel:-$targetSystem}" >$beLoud 2>$beSilent || code=$? run-hook-script 'Post Installation' @{config.wip.fs.disks.postInstallCommands!writeText.postInstallCommands} || exit @@ -167,7 +164,7 @@ function install-system-to {( set -u # 1: mnt else ( set +x ; echo "Installation done! This shell is in a chroot in the mounted system for inspection. Exiting the shell will unmount the system." 1>&2 ) fi - PATH=@{config.systemd.package}/bin:$PATH @{native.nixos-install-tools}/bin/nixos-enter --root $mnt -- /nix/var/nix/profiles/system/sw/bin/bash --login || exit # +o monitor + LC_ALL=C PATH=$PATH:@{native.util-linux}/bin @{native.nixos-install-tools}/bin/nixos-enter --root $mnt -- /nix/var/nix/profiles/system/sw/bin/bash -c 'source /etc/set-environment ; exec bash --login' || exit # +o monitor fi mkdir -p $mnt/var/lib/systemd/timesync && touch $mnt/var/lib/systemd/timesync/clock || true # save current time diff --git a/lib/setup-scripts/keys.sh b/lib/setup-scripts/keys.sh index 19a3680..f61757c 100644 --- a/lib/setup-scripts/keys.sh +++ b/lib/setup-scripts/keys.sh @@ -3,10 +3,10 @@ ## Prompts for the password of every user that uses a »passwordFile«, to later use that password for home encryption and/or save it in the »passwordFile«. function prompt-for-user-passwords { # (void) declare -g -A userPasswords=( ) # (this ends up in the caller's scope) - for user in "@{!config.users.users!catAttrSets.password[@]}" ; do # Also grab any plaintext passwords for testing setups. + local user ; for user in "@{!config.users.users!catAttrSets.password[@]}" ; do # Also grab any plaintext passwords for testing setups. userPasswords[$user]=@{config.users.users!catAttrSets.password[$user]} done - for user in "@{!config.users.users!catAttrSets.passwordFile[@]}" ; do + local user ; for user in "@{!config.users.users!catAttrSets.passwordFile[@]}" ; do if ! userPasswords[$user]=$(prompt-new-password "for the user account »$user«") ; then true ; \return 1 ; fi done } @@ -14,54 +14,52 @@ function prompt-for-user-passwords { # (void) ## Mounts a ramfs as the host's keystore and populates it with keys as requested by »config.wip.fs.keystore.keys«. # Depending on the specified key types/sources, this may prompt for user input. -function populate-keystore { { # (void) +function populate-keystore { # (void) local keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} - mkdir -p $keystore && chmod 750 $keystore && prepend_trap "rmdir $keystore" EXIT - mount ramfs -t ramfs $keystore && prepend_trap "umount $keystore" EXIT -} && ( set -eu + mkdir -p $keystore && chmod 750 $keystore && prepend_trap "rmdir $keystore" EXIT || return + @{native.util-linux}/bin/mount ramfs -t ramfs $keystore && prepend_trap "@{native.util-linux}/bin/umount $keystore" EXIT || return - declare -A methods=( ) ; declare -A options=( ) - for usage in "@{!config.wip.fs.keystore.keys[@]}" ; do - methodAndOptions="@{config.wip.fs.keystore.keys[$usage]}" - method=$(<<<"$methodAndOptions" cut -d= -f1) - methods[$usage]=$method ; options[$usage]=${methodAndOptions/$method=/} # TODO: if no options are provided, this passes the method string as options (use something like ${methodAndOptions:(- $(( ${#method} + 1 ))}) + local -A methods=( ) ; local -A options=( ) + local usage ; for usage in "@{!config.wip.fs.keystore.keys[@]}" ; do + methods[$usage]=@{config.wip.fs.keystore.keys[$usage]%%=*} + options[$usage]=@{config.wip.fs.keystore.keys[$usage]:$(( ${#methods[$usage]} + 1 ))} done - for usage in "${!methods[@]}" ; do + local usage ; for usage in "${!methods[@]}" ; do if [[ "${methods[$usage]}" != inherit ]] ; then continue ; fi - from=${options[$usage]} + local from=${options[$usage]} methods[$usage]=${methods[$from]} ; options[$usage]=${options[$from]} done - for usage in "${!methods[@]}" ; do + local usage ; for usage in "${!methods[@]}" ; do if [[ "${methods[$usage]}" == home-composite || "${methods[$usage]}" == copy ]] ; then continue ; fi - for attempt in 2 3 x ; do + local attempt ; for attempt in 2 3 x ; do if gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key ; then break ; fi - if [[ $attempt == x ]] ; then \exit 1 ; fi ; echo "Retrying ($attempt/3):" + if [[ $attempt == x ]] ; then \return 1 ; fi ; echo "Retrying ($attempt/3):" done done - for usage in "${!methods[@]}" ; do + local usage ; for usage in "${!methods[@]}" ; do if [[ "${methods[$usage]}" != home-composite ]] ; then continue ; fi - gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key || \exit 1 + gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key || return done - for usage in "${!methods[@]}" ; do + local usage ; for usage in "${!methods[@]}" ; do if [[ "${methods[$usage]}" != copy ]] ; then continue ; fi - gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key || \exit 1 + gen-key-"${methods[$usage]}" "$usage" "${options[$usage]}" | write-secret "$keystore"/"$usage".key || return done -)} +} ## Creates the LUKS devices specified by the host using the keys created by »populate-keystore«. function create-luks-layers { # (void) - keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} + local keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} for luksName in "@{!config.boot.initrd.luks.devices!catAttrSets.device[@]}" ; do - rawDev=@{config.boot.initrd.luks.devices!catAttrSets.device[$luksName]} + local rawDev=@{config.boot.initrd.luks.devices!catAttrSets.device[$luksName]} if ! is-partition-on-disks "$rawDev" "${blockDevs[@]}" ; then echo "Partition alias $rawDev used by LUKS device $luksName does not point at one of the target disks ${blockDevs[@]}" 1>&2 ; \return 1 ; fi - primaryKey="$keystore"/luks/"$luksName"/0.key + local primaryKey="$keystore"/luks/"$luksName"/0.key - keyOptions=( --pbkdf=pbkdf2 --pbkdf-force-iterations=1000 ) + local keyOptions=( --pbkdf=pbkdf2 --pbkdf-force-iterations=1000 ) ( PATH=@{native.cryptsetup}/bin ; ${_set_x:-:} ; cryptsetup --batch-mode luksFormat --key-file="$primaryKey" "${keyOptions[@]}" -c aes-xts-plain64 -s 512 -h sha256 "$rawDev" ) || return - for index in 1 2 3 4 5 6 7 ; do + local index ; for index in 1 2 3 4 5 6 7 ; do if [[ -e "$keystore"/luks/"$luksName"/"$index".key ]] ; then ( PATH=@{native.cryptsetup}/bin ; ${_set_x:-:} ; cryptsetup luksAddKey --key-file="$primaryKey" "${keyOptions[@]}" "$rawDev" "$keystore"/luks/"$luksName"/"$index".key ) || return fi @@ -69,13 +67,13 @@ function create-luks-layers { # (void) done } -## Opens the LUKS devices specified by the host, using the opened host's keystore. +## Opens the LUKS devices specified by the host, using the host's (open) keystore. function open-luks-layers { # (void) - keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} + local keystore=/run/keystore-@{config.networking.hostName!hashString.sha256:0:8} for luksName in "@{!config.boot.initrd.luks.devices!catAttrSets.device[@]}" ; do if [[ -e /dev/mapper/$luksName ]] ; then continue ; fi - rawDev=@{config.boot.initrd.luks.devices!catAttrSets.device[$luksName]} - primaryKey="$keystore"/luks/"$luksName"/0.key + local rawDev=@{config.boot.initrd.luks.devices!catAttrSets.device[$luksName]} + local primaryKey="$keystore"/luks/"$luksName"/0.key @{native.cryptsetup}/bin/cryptsetup --batch-mode luksOpen --key-file="$primaryKey" "$rawDev" "$luksName" || return prepend_trap "@{native.cryptsetup}/bin/cryptsetup close $luksName" EXIT || return done diff --git a/lib/setup-scripts/maintenance.sh b/lib/setup-scripts/maintenance.sh index 6f18076..c8e9520 100644 --- a/lib/setup-scripts/maintenance.sh +++ b/lib/setup-scripts/maintenance.sh @@ -4,27 +4,27 @@ ## ## On the host and for the user it is called by, creates/registers a VirtualBox VM meant to run the shells target host. Requires the path to the target host's »diskImage« as the result of running the install script. The image file may not be deleted or moved. If »bridgeTo« is set (to a host interface name, e.g. as »eth0«), it is added as bridged network "Adapter 2" (which some hosts need). -function register-vbox {( set -eu # 1: diskImages, 2?: bridgeTo +function register-vbox {( # 1: diskImages, 2?: bridgeTo diskImages=$1 ; bridgeTo=${2:-} vmName="nixos-@{config.networking.hostName}" - VBoxManage=$( PATH=$hostPath which VBoxManage ) # The host is supposed to run these anyway, and »pkgs.virtualbox« is marked broken on »aarch64«. + VBoxManage=$( PATH=$hostPath which VBoxManage ) || exit # The host is supposed to run these anyway, and »pkgs.virtualbox« is marked broken on »aarch64«. - $VBoxManage createvm --name "$vmName" --register --ostype Linux26_64 - $VBoxManage modifyvm "$vmName" --memory 2048 --pae off --firmware efi + $VBoxManage createvm --name "$vmName" --register --ostype Linux26_64 || exit + $VBoxManage modifyvm "$vmName" --memory 2048 --pae off --firmware efi || exit - $VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on + $VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on || exit index=0 ; for decl in ${diskImages//:/ } ; do diskImage=${decl/*=/} if [[ ! -e $diskImage.vmdk ]] ; then - $VBoxManage internalcommands createrawvmdk -filename $diskImage.vmdk -rawdisk $diskImage # pass-through + $VBoxManage internalcommands createrawvmdk -filename $diskImage.vmdk -rawdisk $diskImage || exit # pass-through #VBoxManage convertfromraw --format VDI $diskImage $diskImage.vmdk && rm $diskImage # convert fi - $VBoxManage storageattach "$vmName" --storagectl SATA --port $(( index++ )) --device 0 --type hdd --medium $diskImage.vmdk + $VBoxManage storageattach "$vmName" --storagectl SATA --port $(( index++ )) --device 0 --type hdd --medium $diskImage.vmdk || exit done if [[ $bridgeTo ]] ; then # VBoxManage list bridgedifs - $VBoxManage modifyvm "$vmName" --nic2 bridged --bridgeadapter2 $bridgeTo + $VBoxManage modifyvm "$vmName" --nic2 bridged --bridgeadapter2 $bridgeTo || exit fi # The serial settings between qemu and vBox seem incompatible. With a simple »console=ttyS0«, vBox hangs on start. So just disable this for now an use qemu for headless setups. The UX here is awful anyway. @@ -80,7 +80,7 @@ function run-qemu { # 1: diskImages, ...: qemuArgs #qemu+=( -bios ${ovmf}/FV/OVMF.fd ) # This works, but is a legacy fallback that stores the EFI vars in /NvVars on the EFI partition (which is really bad). local fwName=OVMF ; if [[ @{pkgs.system} == aarch64-* ]] ; then fwName=AAVMF ; fi # fwName=QEMU qemu+=( -drive file=${ovmf}/FV/${fwName}_CODE.fd,if=pflash,format=raw,unit=0,readonly=on ) - local efiVars=${args[efi-vars]:-${XDG_RUNTIME_DIR:-/run/user/$(id -u)}/qemu-@{outputName:-@{config.system.name}}-VARS.fd} + local efiVars=${args[efi-vars]:-"${XDG_RUNTIME_DIR:-/run/user/$(id -u)}/qemu-@{outputName:-@{config.system.name}}-VARS.fd"} qemu+=( -drive file="$efiVars",if=pflash,format=raw,unit=1 ) if [[ ! -e "$efiVars" ]] ; then mkdir -pm700 "$( dirname "$efiVars" )" ; cat ${ovmf}/FV/${fwName}_VARS.fd >"$efiVars" || return ; fi # https://lists.gnu.org/archive/html/qemu-discuss/2018-04/msg00045.html @@ -157,26 +157,26 @@ function run-qemu { # 1: diskImages, ...: qemuArgs ## Creates a random static key on a new key partition on the GPT partitioned »$blockDev«. The drive can then be used as headless but removable disk unlock method. # To create/clear the GPT: $ sgdisk --zap-all "$blockDev" -function add-bootkey-to-keydev {( set -eu # 1: blockDev, 2?: hostHash - blockDev=$1 ; hostHash=${2:-@{config.networking.hostName!hashString.sha256}} - bootkeyPartlabel=bootkey-${hostHash:0:8} - @{native.gptfdisk}/bin/sgdisk --new=0:0:+1 --change-name=0:"$bootkeyPartlabel" --typecode=0:0000 "$blockDev" # create new 1 sector (512b) partition - @{native.parted}/bin/partprobe "$blockDev" ; @{native.systemd}/bin/udevadm settle -t 15 # wait for partitions to update - /dev/disk/by-partlabel/"$bootkeyPartlabel" -)} +function add-bootkey-to-keydev { # 1: blockDev, 2?: hostHash + local blockDev=$1 ; local hostHash=${2:-@{config.networking.hostName!hashString.sha256}} + local bootkeyPartlabel=bootkey-${hostHash:0:8} + @{native.gptfdisk}/bin/sgdisk --new=0:0:+1 --change-name=0:"$bootkeyPartlabel" --typecode=0:0000 "$blockDev" || exit # create new 1 sector (512b) partition + @{native.parted}/bin/partprobe "$blockDev" && @{native.systemd}/bin/udevadm settle -t 15 || exit # wait for partitions to update + /dev/disk/by-partlabel/"$bootkeyPartlabel" || exit +} ## Tries to open and mount the systems keystore from its LUKS partition. If successful, adds the traps to close it when the parent shell exits. +# For the exit traps to trigger on exit from the calling script / shell, this can't run in a sub shell (and therefore can't be called from a pipeline). # See »open-system«'s implementation for some example calls to this function. function mount-keystore-luks { # ...: cryptsetupOptions - # (For the traps to work, this can't run in a sub shell. The function therefore can't use »( set -eu ; ... )« internally and instead has to use »&&« after every command and in place of most »;«, and the function can't be called from a pipeline.) local keystore=keystore-@{config.networking.hostName!hashString.sha256:0:8} mkdir -p -- /run/$keystore && prepend_trap "[[ ! -e /run/$keystore ]] || rmdir /run/$keystore" EXIT || return @{native.cryptsetup}/bin/cryptsetup open "$@" /dev/disk/by-partlabel/$keystore $keystore && prepend_trap "@{native.cryptsetup}/bin/cryptsetup close $keystore" EXIT || return - mount -o nodev,umask=0077,fmask=0077,dmask=0077,ro /dev/mapper/$keystore /run/$keystore && prepend_trap "umount /run/$keystore" EXIT || return + @{native.util-linux}/bin/mount -o nodev,umask=0077,fmask=0077,dmask=0077,ro /dev/mapper/$keystore /run/$keystore && prepend_trap "@{native.util-linux}/bin/umount /run/$keystore" EXIT || return } ## Performs any steps necessary to mount the target system at »/tmp/nixos-install-@{config.networking.hostName}« on the current host. -# For any steps taken, it also adds the steps to undo them on exit from the calling shell, and it always adds the exit trap to do the unmounting itself. +# For any steps taken, it also adds the steps to undo them on exit from the calling shell (so don't call this from a sub-shell that exits too early). # »diskImages« may be passed in the same format as to the installer. If so, any image files are ensured to be loop-mounted. # Perfect to inspect/update/amend/repair a system's installation afterwards, e.g.: # $ source ${config_wip_fs_disks_initSystemCommands1writeText_initSystemCommands} @@ -185,23 +185,20 @@ function mount-keystore-luks { # ...: cryptsetupOptions # $ nixos-install --system ${config_system_build_toplevel} --no-root-passwd --no-channel-copy --root $mnt # $ nixos-enter --root $mnt function open-system { # 1?: diskImages - # (for the traps to work, this can't run in a sub shell, so also can't »set -eu«, so use »&&« after every command and in place of most »;«) - local diskImages=${1:-} # If »diskImages« were specified and they point at files that aren't loop-mounted yet, then loop-mount them now: - local images=$( losetup --list --all --raw --noheadings --output BACK-FILE ) + local images=$( @{native.util-linux}/bin/losetup --list --all --raw --noheadings --output BACK-FILE ) local decl ; for decl in ${diskImages//:/ } ; do local image=${decl/*=/} ; if [[ $image != /dev/* ]] && ! <<<$images grep -xF $image ; then - local blockDev=$( losetup --show -f "$image" ) && prepend_trap "losetup -d '$blockDev'" EXIT || return + local blockDev=$( @{native.util-linux}/bin/losetup --show -f "$image" ) && prepend_trap "@{native.util-linux}/bin/losetup -d '$blockDev'" EXIT || return @{native.parted}/bin/partprobe "$blockDev" || return - fi done @{native.systemd}/bin/udevadm settle -t 15 || true # sometimes partitions aren't quite made available yet if [[ @{config.wip.fs.keystore.enable} && ! -e /dev/mapper/keystore-@{config.networking.hostName!hashString.sha256:0:8} ]] ; then # Try a bunch of approaches for opening the keystore: - mount-keystore-luks --key-file=<( printf %s "@{config.networking.hostName}" ) || - mount-keystore-luks --key-file=/dev/disk/by-partlabel/bootkey-@{config.networking.hostName!hashString.sha256:0:8} || - mount-keystore-luks --key-file=<( read -s -p PIN: pin && echo ' touch!' >&2 && ykchalresp -2 "$pin" ) || + mount-keystore-luks --key-file=<( printf %s "@{config.networking.hostName}" ) || return + mount-keystore-luks --key-file=/dev/disk/by-partlabel/bootkey-@{config.networking.hostName!hashString.sha256:0:8} || return + mount-keystore-luks --key-file=<( read -s -p PIN: pin && echo ' touch!' >&2 && @{native.yubikey-personalization}/bin/ykchalresp -2 "$pin" ) || return # TODO: try static yubikey challenge mount-keystore-luks || return fi @@ -212,12 +209,13 @@ function open-system { # 1?: diskImages open-luks-layers || return # Load crypt layers and zfs pools: if [[ $( LC_ALL=C type -t ensure-datasets ) == 'function' ]] ; then local poolName ; for poolName in "@{!config.wip.fs.zfs.pools[@]}" ; do - if ! zfs get -o value -H name "$poolName" &>/dev/null ; then - zpool import -f -N -R "$mnt" "$poolName" ; prepend_trap "zpool export '$poolName'" EXIT || return + if [[ ! @{config.wip.fs.zfs.pools!catAttrSets.createDuringInstallation[$poolName]} ]] ; then continue ; fi + if ! @{native.zfs}/bin/zfs get -o value -H name "$poolName" &>/dev/null ; then + @{native.zfs}/bin/zpool import -f -N -R "$mnt" "$poolName" && prepend_trap "@{native.zfs}/bin/zpool export '$poolName'" EXIT || return fi - : | zfs load-key -r "$poolName" || true + : | @{native.zfs}/bin/zfs load-key -r "$poolName" || true + ensure-datasets "$mnt" '^'"$poolName"'($|[/])' || return done - ensure-datasets "$mnt" || return fi prepend_trap "unmount-system '$mnt'" EXIT && mount-system "$mnt" '' 1 || return diff --git a/lib/setup-scripts/utils.sh b/lib/setup-scripts/utils.sh index 451d9f0..31af7b6 100644 --- a/lib/setup-scripts/utils.sh +++ b/lib/setup-scripts/utils.sh @@ -93,18 +93,30 @@ function prompt-new-password {( set -u # 1: usage )} ## Runs an installer hook script, optionally stepping through the script. -function run-hook-script {( set -eu # 1: title, 2: scriptPath +function run-hook-script {( # 1: title, 2: scriptPath trap - EXIT # start with empty traps for sub-shell if [[ ${args[inspectScripts]:-} && "$(cat "$2")" != $'' ]] ; then echo "Running $1 commands. For each command printed, press Enter to continue or Ctrl+C to abort the installation:" 1>&2 # (this does not help against intentionally malicious scripts, it's quite easy to trick this) BASH_PREV_COMMAND= ; set -o functrace ; trap 'if [[ $BASH_COMMAND != "$BASH_PREV_COMMAND" ]] ; then echo -n "> $BASH_COMMAND" >&2 ; read ; fi ; BASH_PREV_COMMAND=$BASH_COMMAND' debug fi + set -e # The called script snippets should not rely on this, but neither should this function rely on the scripts correctly exiting on errors. source "$2" )} ## Lazily builds a nix derivation at run time, instead of when building the script. # When maybe-using packages that take long to build, instead of »at{some.package.out}«, use: »$( build-lazy at{some.package.drvPath!unsafeDiscardStringContext} out )« function build-lazy { # 1: drvPath, 2?: output - PATH=$PATH:@{native.openssh}/bin @{native.nix}/bin/nix --extra-experimental-features nix-command build --no-link --json ${args[quiet]:+--quiet} $1 | @{native.jq}/bin/jq -r .[0].outputs.${2:-out} + # Nix v2.14 introduced a new syntax for selecting the output of multi-output derivations, v2.15 then changed the default when passing the path to an on-disk derivation. »--print-out-paths« is also not available in older versions. + if version-gr-eq "@{native.nix.version}" '2.14' ; then + PATH=$PATH:@{native.openssh}/bin @{native.nix}/bin/nix --extra-experimental-features nix-command build --no-link --print-out-paths ${args[quiet]:+--quiet} "$1"'^'"${2:-out}" + else + PATH=$PATH:@{native.openssh}/bin @{native.nix}/bin/nix --extra-experimental-features nix-command build --no-link --json ${args[quiet]:+--quiet} "$1" | @{native.jq}/bin/jq -r .[0].outputs."${2:-out}" + fi } + +## Tests whether (returns 0/success if) the first version argument is greater/less than (or equal) the second version argument. +function version-gr-eq { printf '%s\n%s' "$1" "$2" | sort -C -V -r; } +function version-lt-eq { printf '%s\n%s' "$1" "$2" | sort -C -V ; } +function version-gt { ! version-gt-eq "$2" "$1" ; } +function version-lt { ! version-lt-eq "$2" "$1" ; } diff --git a/lib/setup-scripts/zfs.sh b/lib/setup-scripts/zfs.sh index 5cfcb79..acba11c 100644 --- a/lib/setup-scripts/zfs.sh +++ b/lib/setup-scripts/zfs.sh @@ -32,7 +32,7 @@ function create-zpool { # 1: mnt, 2: poolName fi done @{native.kmod}/bin/modprobe zfs || true - <$keySrc @{native.xxd}/bin/xxd -l 32 -c 64 -p | ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zpool create "${zpoolCreate[@]}" -R "$mnt" "${pool[name]}" "${vdevs[@]}" ) || return + <$keySrc @{native.xxd}/bin/xxd -l 32 -c 64 -p | ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zpool create ${args[zpool-force]:+-f} "${zpoolCreate[@]}" -R "$mnt" "${pool[name]}" "${vdevs[@]}" ) || return if [[ $keySrc == /dev/urandom ]] ; then @{native.zfs}/bin/zfs unload-key "$poolName" &>/dev/null ; fi prepend_trap "@{native.zfs}/bin/zpool export '$poolName'" EXIT || return @@ -43,14 +43,13 @@ function create-zpool { # 1: mnt, 2: poolName ## Ensures that the system's datasets exist and have the defined properties (but not that they don't have properties that aren't defined). # The pool(s) must exist, be imported with root prefix »$mnt«, and (if datasets are to be created or encryption roots to be inherited) the system's keystore must be open (see »mount-keystore-luks«) or the keys be loaded. # »keystatus« and »mounted« of existing datasets should remain unchanged, newly crated datasets will not be mounted but have their keys loaded. -function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp +function ensure-datasets { # 1: mnt, 2?: filterExp if (( @{#config.wip.fs.zfs.datasets[@]} == 0 )) ; then \return ; fi local mnt=$1 ; while [[ "$mnt" == */ ]] ; do mnt=${mnt:0:(-1)} ; done # (remove any tailing slashes) local filterExp=${2:-'^'} local tmpMnt=$(mktemp -d) ; trap "rmdir $tmpMnt" EXIT local zfs=@{native.zfs}/bin/zfs - : 'Step-through is very verbose and breaks the loop, disabling it for this function' ; trap - debug local name ; while IFS= read -u3 -r -d $'\0' name ; do if [[ ! $name =~ $filterExp ]] ; then printf 'Skipping dataset »%s« since it does not match »%s«\n' "$name" "$filterExp" >&2 ; continue ; fi @@ -67,25 +66,37 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp if [[ ${props[mountpoint]} == "${current:-/}" ]] ; then unset props[mountpoint] ; fi fi if [[ ${props[keyformat]:-} == ephemeral ]] ; then - cryptRoot=${dataset[name]} ; unset props[keyformat] ; props[keylocation]=file:///dev/null + cryptRoot= ; unset props[keyformat] ; props[keylocation]=file:///dev/null fi if [[ $explicitKeylocation ]] ; then props[keylocation]=$explicitKeylocation ; fi unset props[encryption] ; unset props[keyformat] # can't change these anyway - local propNames=$( IFS=, ; echo "${!props[*]}" ) - local propValues=$( IFS=$'\n' ; echo "${props[*]}" ) - if [[ $propValues != "$( $zfs get -o value -H "$propNames" "${dataset[name]}" )" ]] ; then - local -a zfsSet=( ) ; local propName ; for propName in "${!props[@]}" ; do zfsSet+=( "${propName}=${props[$propName]}" ) ; done - ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs set "${zfsSet[@]}" "${dataset[name]}" ) || return + + function ensure-props { # 1: datasetName + local datasetName=$1 + local propNames=$( IFS=, ; echo "${!props[*]}" ) + local propValues=$( IFS=$'\n' ; echo "${props[*]}" ) + if [[ $propValues != "$( $zfs get -o value -H "$propNames" "$datasetName" )" ]] ; then + local -a zfsSet=( ) ; local propName ; for propName in "${!props[@]}" ; do zfsSet+=( "${propName}=${props[$propName]}" ) ; done + ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs set "${zfsSet[@]}" "$datasetName" ) || return + fi + if [[ $cryptRoot && $( $zfs get -o value -H encryptionroot "$datasetName" ) != "$cryptRoot" ]] ; then ( # inherit key from parent (which the parent would also already have done if necessary) + if [[ $( $zfs get -o value -H keystatus "$cryptRoot" ) != available ]] ; then + $zfs load-key -L file://"$cryptKey" "$cryptRoot" || exit ; trap "$zfs unload-key $cryptRoot || true" EXIT + fi + if [[ $( $zfs get -o value -H keystatus "$datasetName" ) != available ]] ; then + $zfs load-key -L file://"$cryptKey" "$datasetName" || exit # will unload with cryptRoot + fi + ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs change-key -i "$datasetName" ) || exit + ) || return ; fi + } + ensure-props "${dataset[name]}" || return + + if [[ ${dataset[recursiveProps]:-} ]] ; then + if [[ ${props[mountpoint]:-} != none ]] ; then unset props[mountpoint] ; fi + while IFS= read -u3 -r name ; do + ensure-props "$name" || return + done 3< <( $zfs list -H -o name -r "${dataset[name]}" | LC_ALL=C sort | tail -n +2 ) fi - if [[ $cryptRoot && $($zfs get -o value -H encryptionroot "${dataset[name]}") != "$cryptRoot" ]] ; then ( # inherit key from parent (which the parent would also already have done if necessary) - if [[ $($zfs get -o value -H keystatus "$cryptRoot") != available ]] ; then - $zfs load-key -L file://"$cryptKey" "$cryptRoot" || exit ; trap "$zfs unload-key $cryptRoot || true" EXIT - fi - if [[ $($zfs get -o value -H keystatus "${dataset[name]}") != available ]] ; then - $zfs load-key -L file://"$cryptKey" "${dataset[name]}" || exit # will unload with cryptRoot - fi - ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs change-key -i "${dataset[name]}" ) || exit - ) || return ; fi else ( # create dataset if [[ ${props[keyformat]:-} == ephemeral ]] ; then @@ -99,11 +110,11 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp trap "$zfs unload-key $cryptRoot || true" EXIT fi declare -a zfsCreate=( ) ; for name in "${!props[@]}" ; do zfsCreate+=( -o "${name}=${props[$name]}" ) ; done - ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs create "${zfsCreate[@]}" "${dataset[name]}" ) + ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs create "${zfsCreate[@]}" "${dataset[name]}" ) || exit fi if [[ ${props[canmount]} != off ]] ; then ( - mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt && trap "umount '${dataset[name]}'" EXIT && - chmod 000 "$tmpMnt" && chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" && chmod "${dataset[mode]}" -- "$tmpMnt" + @{native.util-linux}/bin/mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt && trap "@{native.util-linux}/bin/umount '${dataset[name]}'" EXIT && + chmod 000 -- "$tmpMnt" && chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" && chmod "${dataset[mode]}" -- "$tmpMnt" ) || exit ; fi if [[ $explicitKeylocation && $explicitKeylocation != "${props[keylocation]:-}" ]] ; then ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs set keylocation="$explicitKeylocation" "${dataset[name]}" ) || exit @@ -117,8 +128,7 @@ function ensure-datasets {( set -eu # 1: mnt, 2?: filterExp ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs allow -$who "${allows[$who]}" "${dataset[name]}" >&2 ) || return done done 3< <( printf '%s\0' "@{!config.wip.fs.zfs.datasets[@]}" | LC_ALL=C sort -z ) - -)} +} ## Given the name (»datasetPath«) of a ZFS dataset, this deducts crypto-related options from the declared keys (»config.wip.fs.keystore.keys."zfs/..."«). function get-zfs-crypt-props { # 1: datasetPath, 2?: name_cryptProps, 3?: name_cryptKey, 4?: name_cryptRoot diff --git a/lib/vars.nix b/lib/vars.nix index 445bd3a..9bc8fdd 100644 --- a/lib/vars.nix +++ b/lib/vars.nix @@ -4,6 +4,9 @@ in rec { ## Data Structures + ## Given a function mapping a name to its value and a list of names, generate that mapping as attribute set. (This is the same as »lib.attrsets.genAttrs« with swapped arguments.) + namesToAttrs = toValue: names: builtins.listToAttrs (map (name: { inherit name; value = toValue name; }) names); + # Given a function and a list, calls the function for each list element, and returns the merge of all attr sets returned by the function # attrs = mapMerge (value: { "${newKey}" = newValue; }) list # attrs = mapMerge (key: value: { "${newKey}" = newValue; }) attrs @@ -43,9 +46,9 @@ in rec { flipNames = attrs: let l1names = builtins.attrNames attrs; l2names = builtins.concatMap builtins.attrNames (builtins.attrValues attrs); - in mapMerge (l2name: { - ${l2name} = mapMerge (l1name: if attrs.${l1name}?${l2name} then { ${l1name} = attrs.${l1name}.${l2name}; } else { }) l1names; - }) l2names; + in namesToAttrs (l2name: ( + mapMerge (l1name: if attrs.${l1name}?${l2name} then { ${l1name} = attrs.${l1name}.${l2name}; } else { }) l1names + )) l2names; # Like »builtins.catAttrs«, just for attribute sets instead of lists: Given an attribute set of attribute sets (»{ ${l1name}.${l2name} = value; }«) and the »name« of a second-level attribute, this returns the attribute set mapping directly from the first level's names to the second-level's values (»{ ${l1name} = value; }«), omitting any first-level attributes that lack the requested second-level attribute. catAttrSets = name: attrs: (builtins.mapAttrs (_: value: value.${name}) (lib.filterAttrs (_: value: value?${name}) attrs)); diff --git a/lib/vps-worker.nix.md b/lib/vps-worker.nix.md index 14ad96d..acde3b2 100644 --- a/lib/vps-worker.nix.md +++ b/lib/vps-worker.nix.md @@ -151,7 +151,7 @@ in ({ services.openssh.enable = true; services.openssh.extraConfig = lib.mkOrder (-1) "Include ${builtins.toFile "user-root.conf" ''Match User root - AuthorizedKeysFile /local/etc/ssh/login.pub + AuthorizedKeysFile /local/etc/ssh/loginKey.pub ''}"; networking.firewall.logRefusedConnections = false; # it's super spam-my and pretty irrelevant documentation.nixos.enable = lib.mkDefault false; # It just takes way to long to make these, and they rebuild way too often ... @@ -273,88 +273,44 @@ in ({ cerateCmd = '' ${prepend_trap} set -o pipefail -u${if debug then "x" else ""} - beQuiet=cat ; if [[ ''${quiet:-} ]] ; then beQuiet=: ; fi keys=${keysOutPath} ; rm -rf "$keys" && mkdir -p "$keys" && chmod 750 "$keys" || exit - for name in setupHost workerHost login ; do - ${pkgs.openssh}/bin/ssh-keygen -q -N "" -t ed25519 -f "$keys"/$name -C $name || exit + for ketName in hostKey loginKey ; do + ${pkgs.openssh}/bin/ssh-keygen -q -N "" -t ed25519 -f "$keys"/$ketName -C $ketName || exit done - echo 'Building the worker image' - image=$(mktemp -u) && prepend_trap "rm -f '$image'" EXIT - SUDO_USER= ${lib.wip.writeSystemScripts { inherit system pkgs; }} install-system --inspect-cmd=' - keys='$( printf %q "$keys" )' ; if [[ -r /tmp/shared/workerHost ]] ; then keys=/tmp/shared ; fi + SUDO_USER= ${lib.wip.writeSystemScripts { inherit system pkgs; }} deploy-system-to-hetzner-vps --inspect-cmd=' + keys='$( printf %q "$keys" )' ; if [[ ''${args[no-vm]:-} ]] ; then keys=/tmp/shared ; fi # "no-vm" is set inside the VM mkdir -p $mnt/local/etc/ssh/ || exit - cp -aT "$keys"/login.pub $mnt/local/etc/ssh/login.pub || exit - cp -aT "$keys"/workerHost $mnt/local/etc/ssh/ssh_host_ed25519_key || exit - cp -aT "$keys"/workerHost.pub $mnt/local/etc/ssh/ssh_host_ed25519_key.pub || exit + cp -aT "$keys"/loginKey.pub $mnt/local/etc/ssh/loginKey.pub || exit + cp -aT "$keys"/hostKey $mnt/local/etc/ssh/ssh_host_ed25519_key || exit + cp -aT "$keys"/hostKey.pub $mnt/local/etc/ssh/ssh_host_ed25519_key.pub || exit chown 0:0 $mnt/local/etc/ssh/* || exit - ' ''${forceVmBuild:+--vm} --vm-shared="$keys" ${if debug then "--trace" else "--quiet"} -- $image & buildPid=$! - wait $buildPid || exit - - echo 'Creating the VPS' - prepend_trap 'if [[ ! ''${buildSucceeded:-} ]] ; then ( '${esc killCmd}' ) ; fi' EXIT - cat ${ubuntu-init} | - ${pkgs.perl}/bin/perl -pe 's|[@]sshLoginPub[@]|'"$( cat "$keys"/login.pub )"'|' | - ${pkgs.perl}/bin/perl -pe 's|[@]sshSetupHostPub[@]|'"$( cat "$keys"/setupHost.pub )"'|' | - ${pkgs.perl}/bin/perl -pe 's|[@]sshSetupHostPriv_prefix8[@]|'"$( cat "$keys"/setupHost | ${pkgs.perl}/bin/perl -pe 's/^/ /' )"'|' | - ${hcloud} server create --image=ubuntu-22.04 --name=${esc name} --type=${esc serverType} --user-data-from-file - ${if suppressCreateEmail then "--ssh-key dummy" else ""} | $beQuiet || exit - # ${hcloud} server poweron ${esc name} || exit # --start-after-create=false + ' ''${forceVmBuild:+--vm} --vm-shared="$keys" ${if debug then "--trace" else "--quiet"} ${lib.optionalString ignoreKill "--vps-keep-on-build-failure"} ${lib.optionalString suppressCreateEmail "--vps-suppress-create-email"} "$@" -- ${esc name} ${esc serverType} || exit # --parallel-build-deploy + rm "$keys"/hostKey || exit # don't need this anymore ip=$( ${hcloud} server ip ${esc name} ) ; echo "$ip" >"$keys"/ip - printf "%s %s\n" "$ip" "$( cat "$keys"/setupHost.pub )" >"$keys"/known_hosts - - printf %s 'Preparing the VPS/worker for image transfer ' - sleep 5 ; for i in $(seq 20) ; do sleep 1 ; if ${sshCmd} -- true &>/dev/null ; then break ; fi ; printf . ; done ; printf ' ' - # The system takes a minimum of time to boot, so might as well chill first. Then the loop fails (loops) only before the VM is created, afterwards it blocks until sshd is up. - ${sshCmd} 'set -o pipefail -u -e - # echo u > /proc/sysrq-trigger # remount all FSes as r/o (did not cut it) - mkdir /tmp/tmp-root ; mount -t tmpfs -o size=100% none /tmp/tmp-root - umount /boot/efi ; rm -rf /var/lib/{apt,dpkg} /var/cache /usr/lib/firmware /boot ; printf . - cp -axT / /tmp/tmp-root/ ; printf . - mount --make-rprivate / ; mkdir -p /tmp/tmp-root/old-root - pivot_root /tmp/tmp-root /tmp/tmp-root/old-root - for i in dev proc run sys ; do mkdir -p /$i ; mount --move /old-root/$i /$i ; done - systemctl daemon-reexec ; systemctl restart sshd - ' || exit ; echo . - - wait $buildPid || exit ; echo 'Writing worker image to VPS' - cat $image | ${pkgs.zstd}/bin/zstd | ${sshCmd} 'set -o pipefail -u -e - &1 | '$beQuiet' ; sleep 2 - /dev/null - /dev/sda - "$keys"/known_hosts - - printf %s 'Waiting for the worker to boot ' - sleep 2 ; for i in $(seq 20) ; do sleep 1 ; if ${sshCmd} -- true &>/dev/null ; then buildSucceeded=1 ; break ; fi ; printf . ; done ; echo - - if [[ ! ''${buildSucceeded:-} ]] ; then echo 'Unable to connect to VPS worker, it may not have booted correctly ' 1>&2 ; exit 1 ; fi - - echo '${sshCmd} "$@"' >"$keys"/ssh ; chmod 555 "$keys"/ssh + printf "%s %s\n" "$ip" "$( cat "$keys"/hostKey.pub )" >"$keys"/known_hosts + printf '%s\n' '#!${pkgs.bash}' 'exec ${sshCmd} "$@"' >"$keys"/ssh ; chmod 555 "$keys"/ssh echo ${remoteStore.urlArg} >"$keys"/store ; echo ${remoteStore.builderArg} >"$keys"/builder - echo 'nix ${lib.concatStringsSep " " remoteStore.builderArgs} "$@"' >"$keys"/remote ; chmod 555 "$keys"/remote + printf '%s\n' '#!${pkgs.bash}' 'exec nix ${lib.concatStringsSep " " remoteStore.builderArgs} "$@"' >"$keys"/remote ; chmod 555 "$keys"/remote ''; - sshCmd = ''${pkgs.openssh}/bin/ssh -oUserKnownHostsFile=${keysOutPath}/known_hosts -i ${keysOutPath}/login root@$( cat ${keysOutPath}/ip )''; + sshCmd = ''${pkgs.openssh}/bin/ssh -oUserKnownHostsFile=${keysOutPath}/known_hosts -i ${keysOutPath}/loginKey root@$( cat ${keysOutPath}/ip )''; killCmd = if ignoreKill then ''echo 'debug mode, keeping server '${esc name}'' else ''${hcloud} server delete ${esc name}''; remoteStore = rec { - urlArg = '''ssh://root@'$( cat ${keysOutPath}/ip )'?compress=true&ssh-key='${keysOutPath}'/login&base64-ssh-public-host-key='$( cat ${keysOutPath}/workerHost.pub | ${pkgs.coreutils}/bin/base64 -w0 )''; + urlArg = '''ssh://root@'$( cat ${keysOutPath}/ip )'?compress=true&ssh-key='${keysOutPath}'/loginKey&base64-ssh-public-host-key='$( cat ${keysOutPath}/hostKey.pub | ${pkgs.coreutils}/bin/base64 -w0 )''; builderArg = (lib.concatStringsSep "' '" [ "'ssh://root@'$( cat ${keysOutPath}/ip )'?compress=true'" # 1. URL (including the keys, the URL gets too ong to create the lockfile path) "i686-linux,x86_64-linux" # 2. platform type - "${keysOutPath}/login" # 3. SSH login key + "${keysOutPath}/loginKey" # 3. SSH login key "${toString (serverTypes.${serverType} or { cpu = 4; }).cpu}" # 4. max parallel builds "-" # 5. speed factor (relative to other builders, so irrelevant) "nixos-test,benchmark,big-parallel" # 6. builder supported features (no kvm) "-" # 7. job required features - ''$( cat ${keysOutPath}/workerHost.pub | ${pkgs.coreutils}/bin/base64 -w0 )'' # 8. builder host key + ''$( cat ${keysOutPath}/hostKey.pub | ${pkgs.coreutils}/bin/base64 -w0 )'' # 8. builder host key ]); builderArgs = [ "--max-jobs" "0" # don't build locally @@ -365,7 +321,7 @@ in ({ }; shell = pkgs.writeShellScriptBin "shell-${name}" '' - quiet=1 ${createScript} || exit ; trap ${killScript} EXIT || exit + ${createScript} "$@" || exit ; trap ${killScript} EXIT || exit ${pkgs.bashInteractive}/bin/bash --init-file ${pkgs.writeText "init-${name}" '' # Execute bash's default logic if no --init-file was provided (to inherit from a normal shell): diff --git a/modules/base.nix.md b/modules/base.nix.md index 2a67db7..16794e1 100644 --- a/modules/base.nix.md +++ b/modules/base.nix.md @@ -119,11 +119,16 @@ in { environment.shellAliases = { "with" = pkgs.writeShellScript "with" '' - help='Synopsys: With the Nix packages »PKGS« (as attribute path read from the imported »nixpkgs« specified on the »NIX_PATH«), run »CMD« with »ARGS«, or »bash --login« if no »CMD« is supplied. - Usage: with [-h] PKGS... [-- [CMD [ARGS...]]]' + help='Synopsys: With the Nix packages »PKGS« (as attribute path read from the imported »nixpkgs« specified on the »NIX_PATH«), run »CMD« with »ARGS«, or »bash --login« if no »CMD« is supplied. In the second form, »CMD« is the same as the last »PKGS« entry. + Usage: with [-h] PKGS... [-- [CMD [ARGS...]]] + with [-h] PKGS... [. [ARGS...]]' pkgs=( ) ; while (( "$#" > 0 )) ; do { if [[ $1 == -h ]] ; then echo "$help" ; exit 0 ; fi - if [[ $1 == -- ]] ; then shift ; break ; fi ; pkgs+=( "$1" ) + if [[ $1 == -- ]] ; then shift ; break ; fi + if [[ $1 == . ]] ; then + shift ; (( ''${#pkgs[@]} == 0 )) || set -- "''${pkgs[-1]}" "$@" ; break + fi + pkgs+=( "$1" ) } ; shift ; done if (( ''${#pkgs[@]} == 0 )) ; then echo "$help" 1>&2 ; exit 1 ; fi if (( "$#" == 0 )) ; then set -- bash --login ; fi diff --git a/modules/fs/disks.nix.md b/modules/fs/disks.nix.md index af7aedf..e51c21b 100644 --- a/modules/fs/disks.nix.md +++ b/modules/fs/disks.nix.md @@ -33,6 +33,7 @@ in { t;1;c # type ; part1 ; W95 FAT32 (LBA) a;1 # active/boot ; part1 ''; }; + partitionDuringInstallation = (lib.mkEnableOption "partitioning of this disk during system installation. If disabled, the disk needs be partitioned, and its filesystems formatted, already or manually. Declaring filesystems or LUKS mappings on unpartitioned devices via `/dev/disk/by-partlabel/...` will currently break the installation.") // { default = true; }; }; }))); default = { primary = { }; }; apply = lib.filterAttrs (k: v: v != null); diff --git a/modules/fs/keystore.nix.md b/modules/fs/keystore.nix.md index d2b5b3a..60f2444 100644 --- a/modules/fs/keystore.nix.md +++ b/modules/fs/keystore.nix.md @@ -92,7 +92,7 @@ in let module = { fs.disks.partitions."keystore-${hash}" = { type = lib.mkDefault "8309"; order = lib.mkDefault 1375; disk = lib.mkDefault "primary"; size = lib.mkDefault "32M"; }; fs.disks.postFormatCommands = '' ( : 'Copy the live keystore to its primary persistent location:' - tmp=$(mktemp -d) ; mount "/dev/mapper/keystore-${hash}" $tmp ; trap "umount $tmp ; rmdir $tmp" EXIT + tmp=$(mktemp -d) && ${pkgs.util-linux}/bin/mount "/dev/mapper/keystore-${hash}" $tmp && trap "${pkgs.util-linux}/bin/umount $tmp && rmdir $tmp" EXIT && ${pkgs.rsync}/bin/rsync -a ${keystore}/ $tmp/ ) ''; diff --git a/modules/fs/zfs.nix.md b/modules/fs/zfs.nix.md index 2121037..184bdb4 100644 --- a/modules/fs/zfs.nix.md +++ b/modules/fs/zfs.nix.md @@ -24,10 +24,10 @@ in let module = { type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = { name = lib.mkOption { description = "Attribute name as name of the pool."; type = lib.types.str; default = name; readOnly = true; }; vdevArgs = lib.mkOption { description = "List of arguments that specify the virtual devices (vdevs) used when initially creating the pool. Can consist of the device type keywords and partition labels. The latter are prefixed with »/dev/mapper/« if a mapping with that name is configured or »/dev/disk/by-partlabel/« otherwise, and then the resulting argument sequence is is used verbatim in »zpool create«."; type = lib.types.listOf lib.types.str; default = [ name ]; example = [ "raidz1" "data1-..." "data2-..." "data3-..." "cache" "cache-..." ]; }; - props = lib.mkOption { description = "Zpool properties to pass when creating the pool. May also set »feature@...« and »compatibility«."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; }; - createDuringInstallation = (lib.mkEnableOption "creation of this pool during system installation. If disabled, the pool needs to exist already or be created manually and the pools disk devices are expected to be present from the first boot onwards") // { default = true; }; - autoApplyDuringBoot = (lib.mkEnableOption "automatically re-applying changed dataset properties and create missing datasets in the initramfs phase during boot for this pool. This can be useful since the keystore is open but no datasets are mounted at that time") // { default = true; }; - autoApplyOnActivation = (lib.mkEnableOption "automatically re-applying changed dataset properties and create missing datasets on system activation for this pool. This may fail for some changes since datasets may be mounted and the keystore is usually closed at this time. Enable ».autoApplyDuringBoot« and reboot to address this") // { default = true; }; + props = lib.mkOption { description = "Zpool properties to pass when creating the pool. May also set »feature@...« and »compatibility«."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; apply = lib.filterAttrs (k: v: v != null); }; + createDuringInstallation = (lib.mkEnableOption "creation of this pool during system installation. If disabled, the pool needs to exist already or be created manually and the pool's disk devices are expected to be present from the first boot onwards") // { default = true; }; + autoApplyDuringBoot = lib.mkOption { description = "Whether to automatically re-apply dataset properties and create missing child datasets in the initramfs phase during boot after this pool's declared datasets changed. This does not get triggered by external changes to the ZFS pool, but when triggered by changes in the declaration, it may affect/revert/correct them. Doing this in the initrd can be useful since the keystore is open but no datasets are mounted at that time."; type = lib.types.bool; default = true; }; + autoApplyOnActivation = lib.mkOption { description = "Same as »autoApplyDuringBoot«, but during system activation, not in the initrd. This works without rebooting, but may fail to apply some changes since datasets may be mounted and the keystore is usually closed at this time."; type = lib.types.bool; default = true; }; }; config = { props.autotrim = lib.mkDefault "on"; # These days, there should be no reason not to trim. props.ashift = lib.mkOptionDefault "12"; # be explicit @@ -42,7 +42,8 @@ in let module = { description = "ZFS datasets managed and mounted on this host."; type = lib.types.attrsOf (lib.types.nullOr (lib.types.submodule ({ name, ... }: { options = { name = lib.mkOption { description = "Attribute name as name of the dataset."; type = lib.types.str; default = name; readOnly = true; }; - props = lib.mkOption { description = "ZFS properties to set on the dataset."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; }; + props = lib.mkOption { description = "ZFS properties to set on the dataset."; type = lib.types.attrsOf (lib.types.nullOr lib.types.str); default = { }; apply = lib.filterAttrs (k: v: v != null); }; + recursiveProps = lib.mkOption { description = "Whether to apply this dataset's ».props« (but not ».permissions«) recursively to its children, even those that are not declared. This applies to invocations of the »ensure-dataset« function (called either explicitly or after changes by »...pools.*.autoApplyDuringBoot/autoApplyOnActivation«) and makes sense for declared leaf datasets that will have children that the NixOS configuration is not aware of (like receive targets)."; type = lib.types.bool; default = false; }; mount = lib.mkOption { description = "Whether to create a »fileSystems« entry to mount the dataset. »noauto« creates an entry with that option set."; type = lib.types.enum [ true "noauto" false ]; default = false; }; permissions = lib.mkOption { description = ''Permissions to set on the dataset via »zfs allow«. Attribute names should express propagation/who and match »/^[dl]?([ug]\d+|e)$/«, the values are the list of permissions granted.''; type = lib.types.attrsOf lib.types.commas; default = { }; }; uid = lib.mkOption { description = "UID owning the dataset's root directory."; type = lib.types.ints.unsigned; default = 0; }; diff --git a/modules/hardware/hetzner-deploy-vps.sh b/modules/hardware/hetzner-deploy-vps.sh new file mode 100644 index 0000000..41b5a18 --- /dev/null +++ b/modules/hardware/hetzner-deploy-vps.sh @@ -0,0 +1,76 @@ + +## Builds the current system's (single »partitionDuringInstallation«ed) disk image and calls »deploy-image-to-hetzner-vps«. The installation heeds any »args« / CLI flags set. +function deploy-system-to-hetzner-vps { # 1: name, 2: serverType + + if [[ ! ${args[quiet]:-} ]] ; then echo 'Building the worker image' ; fi + local image ; image=$( mktemp -u ) && prepend_trap "rm -f '$image'" EXIT || return + local buildPid ; install-system "$image" & buildPid=$! + if [[ ! ${args[parallel-build-deploy]:-} ]] ; then wait $buildPid || return ; fi + + deploy-image-to-hetzner-vps "$1" "$2" "$image" ${args[parallel-build-deploy]:+"$buildPid"} || return +} + +## Creates a new Hetzner Cloud VPS of name »name« and type/size »serverType«, optionally waits for »waitPid« to exit (successfully), copies the system image from the local »imagePath« to the new VPS, boots it, and waits until port 22 is open. +function deploy-image-to-hetzner-vps { # 1: name, 2: serverType, 3: imagePath, 4?: waitPid + local name=$1 serverType=$2 imagePath=$3 waitPid=${4:-} + local stdout=/dev/stdout ; if [[ ${args[quiet]:-} ]] ; then stdout=/dev/null ; fi + + local work ; work=$( mktemp -d ) && prepend_trap "rm -rf $work" EXIT || return + local keyName ; for keyName in host login ; do + @{native.openssh}/bin/ssh-keygen -q -N "" -t ed25519 -f $work/$keyName -C $keyName || return + done + + echo 'Creating the VPS' >$stdout + if [[ ! ${args[vps-keep-on-build-failure]:-} ]] ; then prepend_trap "if [[ ! -e $work/buildSucceeded ]] ; then @{native.hcloud}/bin/hcloud server delete '$name' ; fi" EXIT || return ; fi + cat <$stdout || return + # @{native.hcloud}/bin/hcloud server poweron "$name" || return # --start-after-create=false + + local ip ; ip=$( @{native.hcloud}/bin/hcloud server ip "$name" ) && echo "$ip" >$work/ip || return + printf "%s %s\n" "$ip" "$( cat $work/host.pub )" >$work/known_hosts || return + local sshCmd ; sshCmd="@{native.openssh}/bin/ssh -oUserKnownHostsFile=$work/known_hosts -i $work/login root@$ip" + + printf %s 'Preparing the VPS/worker for image transfer ' >$stdout + sleep 5 ; local i ; for i in $(seq 20) ; do sleep 1 ; if $sshCmd -- true &>/dev/null ; then break ; fi ; printf . >$stdout ; done ; printf ' ' >$stdout + # The system takes a minimum of time to boot, so might as well chill first. Then the loop fails (loops) only before the VM is created, afterwards it blocks until sshd is up. + $sshCmd 'set -o pipefail -u -e + # echo u > /proc/sysrq-trigger # remount all FSes as r/o (did not cut it) + mkdir /tmp/tmp-root ; mount -t tmpfs -o size=100% none /tmp/tmp-root + umount /boot/efi ; rm -rf /var/lib/{apt,dpkg} /var/cache /usr/lib/firmware /boot ; printf . >'$stdout' + cp -axT / /tmp/tmp-root/ ; printf . >'$stdout' + mount --make-rprivate / ; mkdir -p /tmp/tmp-root/old-root + pivot_root /tmp/tmp-root /tmp/tmp-root/old-root + for i in dev proc run sys ; do mkdir -p /$i ; mount --move /old-root/$i /$i ; done + systemctl daemon-reexec ; systemctl restart sshd + ' || return ; echo . >$stdout + + if [[ $waitPid ]] ; then wait $buildPid || return ; fi + echo 'Writing worker image to VPS' >$stdout + @{native.zstd}/bin/zstd -c "$imagePath" | $sshCmd 'set -o pipefail -u -e + /dev/null ; sleep 2 + /dev/null + /dev/sda + $stdout || return + + printf %s 'Waiting for the worker to boot ' >$stdout + sleep 2 ; local i ; for i in $(seq 20) ; do sleep 1 ; if ( exec 2>&- ; echo >/dev/tcp/"$ip"/22 ) ; then touch $work/buildSucceeded ; break ; fi ; printf . >$stdout ; done ; echo >$stdout + + if [[ ! -e $work/buildSucceeded ]] ; then echo 'Unable to connect to VPS worker, it may not have booted correctly ' 1>&2 ; \return 1 ; fi +} diff --git a/modules/hardware/hetzner-vps.nix.md b/modules/hardware/hetzner-vps.nix.md index 995e9e4..3a48869 100644 --- a/modules/hardware/hetzner-vps.nix.md +++ b/modules/hardware/hetzner-vps.nix.md @@ -7,16 +7,22 @@ This is "device" type specific configuration for Hetzner's cloud VPS VMs. ## Installation / Testing -Hetzner Cloud unfortunately doesn't let one directly upload complete images to be deployed on a new server. -Since the VPSes are Qemu VMs, [installed](../../lib/setup-scripts/README.md#install-system-documentation) images can be tested locally in qemu: +Since the VPSes are qemu VMs, the systems can quite accurately be tested locally in qemu: ```bash - nix run '.#' -- sudo run-qemu $image + nix run '.#' -- run-qemu --install ``` -Once the system works locally, one can (for example) create a new server instance, boot it into rescue mode, and: +Once the system works locally, a fresh installation can be deployed to a new VPS: +```bash + HCLOUD_TOKEN=... nix run '.#' -- deploy-system-to-hetzner-vps '' '' +``` +Or deploy an existing image using `deploy-image-to-hetzner-vps`. The `HCLOUD_TOKEN` needs to be created in the cloud console, is specific to the cloud project, has to have write access, and can be revoked after the installation. + +Alternatively, manually create a new server instance, boot it into rescue mode, and copy the [installed](../../lib/setup-scripts/README.md#install-system-documentation) image to it: ```bash cat $image | zstd | ssh $newServerIP 'zstdcat >/dev/sda && sync' ``` -If the image is very large, even if it is mostly empty and with compression, this can take quite a while. + +If the system image is very large, even if it is mostly empty and with compression, the copy process can take quite a while. Declaring a smaller image size and expanding it on boot may be a workaround, but (since it depends on the disk partitioning and filesystems used) is out of scope here. @@ -35,11 +41,14 @@ in { config = lib.mkIf cfg.enable ({ - ${prefix}.bootloader.extlinux.enable = true; + ${prefix} ={ + bootloader.extlinux.enable = true; + setup.scripts.hetzner-deploy.path = ./hetzner-deploy-vps.sh; + }; networking.interfaces.eth0.useDHCP = true; networking.interfaces.eth0.ipv6.routes = [ { address = "::"; prefixLength = 0; via = "fe80::1"; } ]; - networking.timeServers = [ "ntp1.hetzner.de" "ntp2.hetzner.com" "ntp3.hetzner.net" ]; # overwrite NTP + networking.timeServers = [ "ntp1.hetzner.de" "ntp2.hetzner.com" "ntp3.hetzner.net" ]; # (these should be most accurate) profiles.qemu-guest.enable = true; diff --git a/modules/patches/filesystem.nix.md b/modules/patches/filesystem.nix.md index 6c8ba80..98e1f7c 100644 --- a/modules/patches/filesystem.nix.md +++ b/modules/patches/filesystem.nix.md @@ -14,7 +14,11 @@ in { options = { fileSystems = lib.mkOption { type = lib.types.attrsOf (lib.types.submodule [ { options = { - preMountCommands = lib.mkOption { description = "Commands to be run as root every time before mounting this filesystem, but after all its dependents were mounted (TODO: or does this run just once per boot?). This does not order itself before or after `systemd-fsck@\${utils.escapeSystemdPath device}.service`."; type = lib.types.lines; default = ""; }; + preMountCommands = lib.mkOption { description = '' + Commands to be run as root every time before mounting this filesystem, but after all its dependents were mounted (TODO: or does this run just once per boot?). + This does not order itself before or after `systemd-fsck@''${utils.escapeSystemdPath device}.service`. + Note that if a symlink exists at a mount point when systemd's fstab-generator runs, it will read/resolve the symlink and use that as the mount point, resulting in mismatching unit names for that mount, effectively disabling its `preMountCommands`. + ''; type = lib.types.lines; default = ""; }; }; } ]); }; }; diff --git a/modules/vm-exec.nix.md b/modules/vm-exec.nix.md index c4bc312..0ca1296 100644 --- a/modules/vm-exec.nix.md +++ b/modules/vm-exec.nix.md @@ -138,11 +138,11 @@ in let hostModule = { fileSystems = lib.mkVMOverride { "/nix/var/nix/db.lower" = { fsType = "9p"; device = "nix-var-nix-db"; neededForBoot = true; - options = [ "trans=virtio" "version=9p2000.L" "msize=65536" "ro" ]; + options = [ "trans=virtio" "version=9p2000.L" "msize=4194304" "ro" ]; }; - "/nix/store".options = lib.mkAfter [ "ro" "msize=65536" ]; + "/nix/store".options = lib.mkAfter [ "ro" "msize=4194304" ]; "/nix/store".mountPoint = "/nix/store.lower"; - }; # mount -t 9p -o trans=virtio -o version=9p2000.L -o msize=65536 nix-var-nix-db /nix/var/nix/db + }; # mount -t 9p -o trans=virtio -o version=9p2000.L -o msize=4194304 nix-var-nix-db /nix/var/nix/db virtualisation.qemu.options = [ "-virtfs local,path=/nix/var/nix/db,security_model=none,mount_tag=nix-var-nix-db,readonly=on" ]; # (doing this manually to pass »readonly«, to not ever corrupt the host's Nix DBs) }) ({ @@ -155,9 +155,9 @@ in let hostModule = { }) ({ - virtualisation = if (lib.fileContents "${pkgs.path}/.version") > "22.05" then { host.pkgs = pkgs.buildPackages; } else { }; + virtualisation = if (builtins.substring 0 5 pkgs.lib.version) > "22.05" then { host.pkgs = pkgs.buildPackages; } else { }; }) ({ - virtualisation.qemu.package = lib.mkIf (pkgs.buildPackages.system != pkgs.system) cfg.virtualisation.host.pkgs.qemu_full; + virtualisation.qemu.package = lib.mkIf (pkgs.buildPackages.system != pkgs.system) (cfg.virtualisation.host or { pkgs = pkgs.buildPackages; }).pkgs.qemu_full; }) ({