fix installation cleanup,

disable ZFS hibernation (workaround),
enable (very very slow) cross-ISA VM installation
This commit is contained in:
Niklas Gollenstede 2023-06-27 12:53:04 +02:00
parent 5c44084024
commit 1a47c3fa75
11 changed files with 45 additions and 38 deletions

View File

@ -2,15 +2,15 @@
# Automated NixOS CLI Installer
NixOS is traditionally either installed by creating and populating filesystems [by hand](https://nixos.org/manual/nixos/stable/index.html#sec-installation-manual-partitioning), or by scripts that each only support one or a limited set of filesystem setups (the graphical installer falls somewhere between the two).
The mounted filesystems and some drivers, etc, would then be captured in a `hardware-configuration.nix`.
The mounted filesystems and some hardware aspects are then be captured in a `hardware-configuration.nix`.
This both completely contradicts the declarative nature and flexibility of Nix(OS).
In contrast to that, this flake implements a very flexible, yet fully automated, NixOS installer (framework).
In contrast to that, this flake implements a very flexible, declaratively driven and fully automated NixOS installer (framework).
Hosts can define any number of [disks and partitions](./modules/setup/disks.nix.md) on them.
If the `fileSystems` use `partlabel`s to identify their devices, then they can be associated with their partitions even before they are formatted -- and can thus automatically be formatted during the installation.
ZFS [pools and datasets](./modules/setup/zfs.nix.md), and LUKS and ZFS [encryption](./modules/setup/keystore.nix.md) are also supported.
For setups with ephemeral `/` (root filesystem), [`modules/setup/temproot.nix.md`](./modules/setup/temproot.nix.md) provides various preconfigured setups.
This, together with convenient defaults for most of the options, means that
simple setups (see the `minimal` [example](./hosts/example.nix.md)) only require a handful of config lines, while complex multi-disk setups (see the `raidz` [example](./hosts/example.nix.md)) are just as possible.
This, together with convenient defaults for most of the options, means that simple setups (see the `minimal` [example](./hosts/example.nix.md)) only require a handful of config lines, while complex multi-disk setups (see the `raidz` [example](./hosts/example.nix.md)) are just as possible.
A set of composable [`setup-scripts`](./lib/setup-scripts/) can then [automatically](https://github.com/NiklasGollenstede/nix-functions/blob/master/lib/scripts.nix#substituteImplicit) grab this information and perform a completely automated installation.
The only thing that the scripts will interactively prompt for are external secrets (e.g., passwords), iff required by the new host.
@ -18,7 +18,7 @@ When using [`mkSystemsFlake`](./lib/nixos.nix#mkSystemsFlake), the installation
```bash
nix run .'#'hostname -- install-system /path/to/disk
```
Help output with information on available commands and flags is available via:
Help output with information on available commands and flags is [available here](https://github.com/NiklasGollenstede/nixos-installer/wiki/help-Output) or via:
```bash
nix run .'#'hostname -- --help
```

Binary file not shown.

View File

@ -135,6 +135,4 @@ in { preface = { # (any »preface« options have to be defined here)
boot.kernelParams = [ /* "console=tty1" */ "console=ttyS0" "boot.shell_on_fail" ];
boot.zfs.allowHibernation = lib.mkForce false; # Ugh: https://github.com/NixOS/nixpkgs/commit/c70f0473153c63ad1cf6fbea19f290db6b15291f
}) ]; }

View File

@ -168,17 +168,17 @@ in rec {
description = ''
Call per-host setup and maintenance commands. Most importantly, »install-system«.
'';
ownPath = if exported then "nix run REPO#${system.config.${installer}.outputName} --" else "$0";
ownPath = if (system.options.${installer}.outputName != null) then "nix run REPO#${system.config.${installer}.outputName} --" else "$0";
usageLine = ''
Usage:
%s [sudo] [bash] [--FLAG[=value]]... [--] [COMMAND [ARG]...]
${lib.optionalString exported ''
Where »REPO« is the path to a flake repo using »mkSystemsFlake« for it's »apps« output.
${lib.optionalString (system.options.${installer}.outputName != null) ''
Where »REPO« is the path to the flake repo exporting this system (»${system.config.${installer}.outputName}«) using »mkSystemsFlake«.
''} If the first argument (after the first »--«) is »sudo«, then the program will re-execute itself as root using sudo (minus that »sudo« argument).
If the (then) first argument is »bash«, or if there are no (more) arguments, it will execute an interactive shell with the »COMMAND«s (bash functions and exported Nix values used by them) sourced.
If a »FLAG« »--command« is supplied, then the first positional argument (»COMMAND«) is »eval«ed as bash instructions, otherwise the first argument should be one of the »COMMAND«s below, which will be called with the positional CLI »ARG«s as arguments.
»FLAG«s may be set to customize the behavior of »COMMAND« or any sub-commands it or »SCRIPT« call.
»FLAG«s may be set to customize the behavior of »COMMAND« or any sub-commands it calls.
»COMMAND« should be one of:%s
@ -188,17 +188,19 @@ in rec {
Examples:
Install the host named »$target« to the image file »/tmp/system-$target.img«:
$ nix run .#$target -- install-system /tmp/system-$target.img
Install the system »$host« to the image file »/tmp/system-$host.img«:
$ nix run .#$host -- install-system /tmp/system-$host.img
Test a fresh installation of »$host« in a qemu VM:
$ nix run .#$host -- run-qemu --install=always
Run an interactive bash session with the setup functions in the context of the current host:
$ nix run /etc/nixos/#$(hostname)
# Now run any of the »COMMAND«s above, or inspect/use the exported Nix variables (»declare -p config_<TAB><TAB>«).
Now run any of the »COMMAND«s above, or inspect/use the exported Nix variables (»declare -p config_<TAB><TAB>«).
Run a root session in the context of a different host (useful if Nix is not installed for root on the current host):
$ nix run .#other-host -- sudo
'';
exported = system.options.${installer}.outputName.isDefined;
tools = lib.unique (map (p: p.outPath) (lib.filter lib.isDerivation pkgs.stdenv.allowedRequisites));
esc = lib.escapeShellArg;
in pkgs.writeShellScript "scripts-${name}" ''

View File

@ -15,8 +15,8 @@ Since the installation needs to format and mount (image files as) disks, it need
* be run with the »sudo« argument (see »--help« output; this runs »nix« commands as the original user, and the rest as root),
* or automatically perform the installation in a qemu VM (see »--vm« flag).
Installing inside the VM is safer (will definitely only write wi the supplied »diskPaths«), more secure (executes the VM), and does not require privilege elevation, but does currently only work for the same ISA, is significantly slower (painfully slow without KVM), and may break custom »*Commands« hooks (esp. those passing in secrets).
Without VM, installations across different ISAs (e.g. from an x64 desktop to a Raspberry Pi microSD) works if the installing host is NixOS and sets »boot.binfmt.emulatedSystems« for the target systems ISA, or on other Linux with a matching »binfmt_misc« registration with the preload (F) flag.
Installing inside the VM is safer (will definitely only write to the supplied »diskPaths«), more secure (executes everything inside the VM), and does not require privilege elevation, is significantly slower (painfully slow without KVM), and may break custom »*Commands« hooks (esp. those passing in secrets). Across ISAs, the VM installation is even slower, taking many hours for even a simple system.
Without VM, installations across different ISAs (e.g. from an x64 desktop to a Raspberry Pi microSD) works (even relatively fast) if the installing host is NixOS and sets »boot.binfmt.emulatedSystems« for the target systems ISA, or on other Linux with a matching »binfmt_misc« registration with the preload (F) flag.
Once done, the disk(s) can be transferred -- or the image(s) be copied -- to the final system, and should boot there.
If the target host's hardware target allows, a resulting image can also be passed to the »register-vbox« command to create a bootable VirtualBox instance for the current user, or to »run-qemu« to start it in a qemu VM.
@ -77,8 +77,6 @@ declare-flag install-system vm-shared "" "When installing inside the VM, specifi
## Re-executes the current system's installation in a qemu VM.
function reexec-in-qemu {
if [[ @{pkgs.buildPackages.system} != "@{native.system}" ]] ; then echo "VM installation (implicit when not running as root) of a system built on a different ISA than the current host's is not supported (yet)." 1>&2 ; \return 1 ; fi
# (not sure whether this works for block devices)
ensure-disks "$1" 1 || return
qemu=( -m 3072 ) ; declare -A qemuDevs=( )
@ -101,6 +99,10 @@ function reexec-in-qemu {
#local output=@{inputs.self}'#'nixosConfigurations.@{config.installer.outputName:?}.config.system.build.vmExec
local output=@{config.system.build.vmExec.drvPath!unsafeDiscardStringContext} # this is more accurate, but also means another system needs to get evaluated every time
if [[ @{pkgs.buildPackages.system} != "@{native.system}" ]] ; then
echo 'Performing the installation in a cross-ISA qemu system VM; this will be very, very slow (many hours) ...'
output=@{inputs.self}'#'nixosConfigurations.@{config.installer.outputName:?}.config.system.build.vmExec-@{pkgs.buildPackages.system}
fi
local scripts=$0 ; if [[ @{pkgs.system} != "@{native.system}" ]] ; then
scripts=$( build-lazy @{inputs.self}'#'apps.@{pkgs.system}.@{config.installer.outputName:?}.derivation ) || return
fi

View File

@ -56,7 +56,7 @@ declare-flag run-qemu dry-run "" "Instead of running the (main) qemu (
declare-flag run-qemu efi "" "Treat the target system as EFI system, even if not recognized as such automatically."
declare-flag run-qemu efi-vars "path" "For »--efi« systems, path to a file storing the EFI variables. The default is in »XDG_RUNTIME_DIR«, i.e. it does not persist across host reboots."
declare-flag run-qemu graphic "" "Open a graphical window even of the target system logs to serial and not (explicitly) TTY1."
declare-flag run-qemu install "[always]" "If any of the guest system's disk images does not exist, perform the its installation before starting the VM. If set to »always«, always install before starting the VM. With this flag set, »diskImages« defaults to paths in »/tmp/."
declare-flag run-qemu install "[1|always]" "If any of the guest system's disk images does not exist, perform the its installation before starting the VM. If set to »always«, always install before starting the VM. With this flag set, »diskImages« defaults to paths in »/tmp/."
declare-flag run-qemu mem "num" "VM RAM in MiB (»qemu -m«)."
declare-flag run-qemu nat-fw "forwards" "Port forwards to the guest's NATed NIC. E.g: »--nat-fw=:8000-:8000,:8001-:8001,127.0.0.1:2022-:22«."
declare-flag run-qemu no-kvm "" "Do not rey to use (or complain about the unavailability of) KVM."

View File

@ -53,7 +53,6 @@ function ensure-datasets {
if (( @{#config.setup.zfs.datasets[@]} == 0 )) ; then \return ; fi
local mnt=$1 ; while [[ "$mnt" == */ ]] ; do mnt=${mnt:0:(-1)} ; done # (remove any tailing slashes)
local filterExp=${2:-'^'}
local tmpMnt=$(mktemp -d) ; trap "rmdir $tmpMnt" EXIT
local zfs=@{native.zfs}/bin/zfs
local name ; while IFS= read -u3 -r -d $'\0' name ; do
@ -119,7 +118,8 @@ function ensure-datasets {
( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs create "${zfsCreate[@]}" "${dataset[name]}" ) || exit
fi
if [[ ${props[canmount]} != off ]] ; then (
@{native.util-linux}/bin/mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt && trap "@{native.util-linux}/bin/umount '${dataset[name]}'" EXIT &&
tmpMnt=$(mktemp -d) ; trap "" EXIT && @{native.util-linux}/bin/mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt &&
trap "@{native.util-linux}/bin/umount '${dataset[name]}' ; rmdir $tmpMnt" EXIT &&
chmod 000 -- "$tmpMnt" && chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" && chmod "${dataset[mode]}" -- "$tmpMnt"
) || exit ; fi
if [[ $explicitKeylocation && $explicitKeylocation != "${props[keylocation]:-}" ]] ; then

View File

@ -43,8 +43,8 @@ in {
postInstall = lib.mkOption { description = desc "just before unmounting the new system" true; type = lib.types.lines; default = ""; };
};
outputName = lib.mkOption {
description = ''The name this system is exported as by its defining flake (as »nixosConfigurations.''${outputName}« and »apps.*-linux.''${outputName}«).'';
type = lib.types.nullOr lib.types.str; # This explicitly does not have a default, so that accessing it when it is not set creates an error.
description = ''The name this system is (/ should be) exported as by its defining flake (as »nixosConfigurations.''${outputName}« and »apps.*-linux.''${outputName}«).'';
type = lib.types.nullOr lib.types.str; default = null;
};
build.scripts = lib.mkOption {
type = lib.types.functionTo lib.types.str; internal = true; readOnly = true;

View File

@ -18,7 +18,7 @@ in {
options = { ${setup}.bootpart = {
enable = lib.mkEnableOption "configuration of a boot partition as GPT partition 1 on the »primary« disk and a FAT32 filesystem on it";
mountpoint = lib.mkOption { description = "Path at which to mount a vfat boot partition."; type = lib.types.str; default = "/boot"; };
createMbrPart = lib.mkOption { description = "Whether to create a hybrid MBR with (only) the boot partition listed as partition 1."; type = lib.types.bool; default = true; };
createMbr = lib.mkOption { description = "Whether to create a hybrid MBR with (only) the boot partition listed as partition 1."; type = lib.types.bool; default = true; };
size = lib.mkOption { description = "Size of the boot partition, should be *more* than 32M(iB)."; type = lib.types.str; default = "2G"; };
}; };
@ -27,7 +27,7 @@ in {
${setup} = {
disks.partitions."boot-${hash}" = { type = lib.mkDefault "ef00"; size = lib.mkDefault cfg.size; index = lib.mkDefault 1; order = lib.mkDefault 1500; disk = lib.mkOptionDefault "primary"; }; # require it to be part1, and create it early
disks.devices = lib.mkIf cfg.createMbrPart { primary = { mbrParts = lib.mkDefault "1"; extraFDiskCommands = ''
disks.devices = lib.mkIf cfg.createMbr { primary = { mbrParts = lib.mkDefault "1"; extraFDiskCommands = ''
t;1;c # type ; part1 ; W95 FAT32 (LBA)
a;1 # active/boot ; part1
''; }; };

View File

@ -121,7 +121,8 @@ in let module = {
} ];
}) (lib.mkIf (config.boot.resumeDevice != "") { ## Make resuming after hibernation safe with ZFS:
}) (lib.mkIf (false && (config.boot.resumeDevice != "")) { ## Make resuming after hibernation safe with ZFS:
# or not: https://github.com/NixOS/nixpkgs/commit/c70f0473153c63ad1cf6fbea19f290db6b15291f
boot.kernelParams = [ "resume=${config.boot.resumeDevice}" ];
assertions = [ { # Just making sure ...

View File

@ -29,20 +29,20 @@ nix run .../nixos-config'#'nixosConfigurations.${hostName}.config.system.build.v
```nix
#*/# end of MarkDown, beginning of NixOS module:
dirname: inputs: { config, options, pkgs, lib, modulesPath, extendModules, ... }: let lib = inputs.self.lib.__internal__; in let
dirname: inputs: moduleArgs@{ config, options, pkgs, lib, modulesPath, extendModules, ... }: let lib = inputs.self.lib.__internal__; in let mainModule = (suffix: extraModule: let
prefix = inputs.config.prefix;
cfg = config.virtualisation.vmVariantExec;
cfg = config.virtualisation."vmVariant${suffix}";
in let hostModule = {
options = { virtualisation.vmVariantExec = lib.mkOption {
options = { virtualisation."vmVariant${suffix}" = lib.mkOption {
description = lib.mdDoc ''Machine configuration to be added to the system's qemu exec VM.'';
inherit (extendModules { modules = [ "${modulesPath}/virtualisation/qemu-vm.nix" vmModule ]; }) type;
inherit (extendModules { modules = [ "${modulesPath}/virtualisation/qemu-vm.nix" vmModule extraModule ]; }) type;
default = { }; visible = "shallow";
}; };
config = {
system.build.vmExec = (let hostPkgs = pkgs; in let
system.build."vm${suffix}" = (let hostPkgs = pkgs; in let
name = "run-${config.system.name}-vm-exec";
launch = "${cfg.system.build.vm}/bin/${cfg.system.build.vm.meta.mainProgram}";
pkgs = if cfg.virtualisation?host.pkgs then cfg.virtualisation.host.pkgs else hostPkgs;
@ -158,7 +158,7 @@ in let hostModule = {
}) ({
virtualisation = if (builtins.substring 0 5 pkgs.lib.version) > "22.05" then { host.pkgs = pkgs.buildPackages; } else { };
virtualisation = if (builtins.substring 0 5 pkgs.lib.version) > "22.05" then { host.pkgs = lib.mkDefault pkgs.buildPackages; } else { };
}) ({
virtualisation.qemu.package = lib.mkIf (pkgs.buildPackages.system != pkgs.system) (cfg.virtualisation.host or { pkgs = pkgs.buildPackages; }).pkgs.qemu_full;
@ -168,9 +168,13 @@ in let hostModule = {
services.qemuGuest.enable = lib.mkForce false;
# tag this to make clearer what's what
system.nixos.tags = [ "vmExec" ];
system.build.isVmExec = true;
system.nixos.tags = [ "vm${suffix}" ];
system.build."isVm${suffix}" = true;
}) ];
}; in hostModule
}; in hostModule); in { imports = [ (mainModule "Exec" { }) ] ++ (map (system: (
mainModule "Exec-${system}" {
virtualisation.host.pkgs = import (moduleArgs.inputs.nixpkgs or inputs.nixpkgs).outPath { inherit (pkgs) overlays config; inherit system; };
}
)) [ "aarch64-linux" "x86_64-linux" ]); }