From 1a47c3fa7546e180fc74756236151763dc6e6d7d Mon Sep 17 00:00:00 2001 From: Niklas Gollenstede Date: Tue, 27 Jun 2023 12:53:04 +0200 Subject: [PATCH] fix installation cleanup, disable ZFS hibernation (workaround), enable (very very slow) cross-ISA VM installation --- README.md | 10 +++++----- flake.lock | Bin 1457 -> 1457 bytes hosts/example.nix.md | 2 -- lib/nixos.nix | 22 ++++++++++++---------- lib/setup-scripts/install.sh | 10 ++++++---- lib/setup-scripts/maintenance.sh | 2 +- lib/setup-scripts/zfs.sh | 4 ++-- modules/installer.nix.md | 4 ++-- modules/setup/bootpart.nix.md | 4 ++-- modules/setup/zfs.nix.md | 3 ++- modules/vm-exec.nix.md | 22 +++++++++++++--------- 11 files changed, 45 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 7ccaa37..0202888 100644 --- a/README.md +++ b/README.md @@ -2,15 +2,15 @@ # Automated NixOS CLI Installer NixOS is traditionally either installed by creating and populating filesystems [by hand](https://nixos.org/manual/nixos/stable/index.html#sec-installation-manual-partitioning), or by scripts that each only support one or a limited set of filesystem setups (the graphical installer falls somewhere between the two). -The mounted filesystems and some drivers, etc, would then be captured in a `hardware-configuration.nix`. +The mounted filesystems and some hardware aspects are then be captured in a `hardware-configuration.nix`. +This both completely contradicts the declarative nature and flexibility of Nix(OS). -In contrast to that, this flake implements a very flexible, yet fully automated, NixOS installer (framework). +In contrast to that, this flake implements a very flexible, declaratively driven and fully automated NixOS installer (framework). Hosts can define any number of [disks and partitions](./modules/setup/disks.nix.md) on them. If the `fileSystems` use `partlabel`s to identify their devices, then they can be associated with their partitions even before they are formatted -- and can thus automatically be formatted during the installation. ZFS [pools and datasets](./modules/setup/zfs.nix.md), and LUKS and ZFS [encryption](./modules/setup/keystore.nix.md) are also supported. For setups with ephemeral `/` (root filesystem), [`modules/setup/temproot.nix.md`](./modules/setup/temproot.nix.md) provides various preconfigured setups. -This, together with convenient defaults for most of the options, means that -simple setups (see the `minimal` [example](./hosts/example.nix.md)) only require a handful of config lines, while complex multi-disk setups (see the `raidz` [example](./hosts/example.nix.md)) are just as possible. +This, together with convenient defaults for most of the options, means that simple setups (see the `minimal` [example](./hosts/example.nix.md)) only require a handful of config lines, while complex multi-disk setups (see the `raidz` [example](./hosts/example.nix.md)) are just as possible. A set of composable [`setup-scripts`](./lib/setup-scripts/) can then [automatically](https://github.com/NiklasGollenstede/nix-functions/blob/master/lib/scripts.nix#substituteImplicit) grab this information and perform a completely automated installation. The only thing that the scripts will interactively prompt for are external secrets (e.g., passwords), iff required by the new host. @@ -18,7 +18,7 @@ When using [`mkSystemsFlake`](./lib/nixos.nix#mkSystemsFlake), the installation ```bash nix run .'#'hostname -- install-system /path/to/disk ``` -Help output with information on available commands and flags is available via: +Help output with information on available commands and flags is [available here](https://github.com/NiklasGollenstede/nixos-installer/wiki/−−help-Output) or via: ```bash nix run .'#'hostname -- --help ``` diff --git a/flake.lock b/flake.lock index caf4747d4aa76324851b2bef541cc3df1b6529b6..9c56b86435f437ef250148f3b66359a21662e356 100644 GIT binary patch delta 113 zcmdnUy^(uE4>P;Dxsj!*f#KxajI!FfAwJo@sR8*Oq2XmA&S}PerQS)Yk*=wU`X$Ag z*}P-&xv`n4spaI`jI!F1m7&35`TpLXE{?^?ArQI;tI zer|@AY2Mk|CdsCmMah$IGka>Jq@«). + $ nix run /etc/nixos/#$(hostname) + Now run any of the »COMMAND«s above, or inspect/use the exported Nix variables (»declare -p config_«). Run a root session in the context of a different host (useful if Nix is not installed for root on the current host): - $ nix run .#other-host -- sudo + $ nix run .#other-host -- sudo ''; - exported = system.options.${installer}.outputName.isDefined; tools = lib.unique (map (p: p.outPath) (lib.filter lib.isDerivation pkgs.stdenv.allowedRequisites)); esc = lib.escapeShellArg; in pkgs.writeShellScript "scripts-${name}" '' diff --git a/lib/setup-scripts/install.sh b/lib/setup-scripts/install.sh index fff3488..001b3a4 100644 --- a/lib/setup-scripts/install.sh +++ b/lib/setup-scripts/install.sh @@ -15,8 +15,8 @@ Since the installation needs to format and mount (image files as) disks, it need * be run with the »sudo« argument (see »--help« output; this runs »nix« commands as the original user, and the rest as root), * or automatically perform the installation in a qemu VM (see »--vm« flag). -Installing inside the VM is safer (will definitely only write wi the supplied »diskPaths«), more secure (executes the VM), and does not require privilege elevation, but does currently only work for the same ISA, is significantly slower (painfully slow without KVM), and may break custom »*Commands« hooks (esp. those passing in secrets). -Without VM, installations across different ISAs (e.g. from an x64 desktop to a Raspberry Pi microSD) works if the installing host is NixOS and sets »boot.binfmt.emulatedSystems« for the target systems ISA, or on other Linux with a matching »binfmt_misc« registration with the preload (F) flag. +Installing inside the VM is safer (will definitely only write to the supplied »diskPaths«), more secure (executes everything inside the VM), and does not require privilege elevation, is significantly slower (painfully slow without KVM), and may break custom »*Commands« hooks (esp. those passing in secrets). Across ISAs, the VM installation is even slower, taking many hours for even a simple system. +Without VM, installations across different ISAs (e.g. from an x64 desktop to a Raspberry Pi microSD) works (even relatively fast) if the installing host is NixOS and sets »boot.binfmt.emulatedSystems« for the target systems ISA, or on other Linux with a matching »binfmt_misc« registration with the preload (F) flag. Once done, the disk(s) can be transferred -- or the image(s) be copied -- to the final system, and should boot there. If the target host's hardware target allows, a resulting image can also be passed to the »register-vbox« command to create a bootable VirtualBox instance for the current user, or to »run-qemu« to start it in a qemu VM. @@ -77,8 +77,6 @@ declare-flag install-system vm-shared "" "When installing inside the VM, specifi ## Re-executes the current system's installation in a qemu VM. function reexec-in-qemu { - if [[ @{pkgs.buildPackages.system} != "@{native.system}" ]] ; then echo "VM installation (implicit when not running as root) of a system built on a different ISA than the current host's is not supported (yet)." 1>&2 ; \return 1 ; fi - # (not sure whether this works for block devices) ensure-disks "$1" 1 || return qemu=( -m 3072 ) ; declare -A qemuDevs=( ) @@ -101,6 +99,10 @@ function reexec-in-qemu { #local output=@{inputs.self}'#'nixosConfigurations.@{config.installer.outputName:?}.config.system.build.vmExec local output=@{config.system.build.vmExec.drvPath!unsafeDiscardStringContext} # this is more accurate, but also means another system needs to get evaluated every time + if [[ @{pkgs.buildPackages.system} != "@{native.system}" ]] ; then + echo 'Performing the installation in a cross-ISA qemu system VM; this will be very, very slow (many hours) ...' + output=@{inputs.self}'#'nixosConfigurations.@{config.installer.outputName:?}.config.system.build.vmExec-@{pkgs.buildPackages.system} + fi local scripts=$0 ; if [[ @{pkgs.system} != "@{native.system}" ]] ; then scripts=$( build-lazy @{inputs.self}'#'apps.@{pkgs.system}.@{config.installer.outputName:?}.derivation ) || return fi diff --git a/lib/setup-scripts/maintenance.sh b/lib/setup-scripts/maintenance.sh index 263c580..357f3c3 100644 --- a/lib/setup-scripts/maintenance.sh +++ b/lib/setup-scripts/maintenance.sh @@ -56,7 +56,7 @@ declare-flag run-qemu dry-run "" "Instead of running the (main) qemu ( declare-flag run-qemu efi "" "Treat the target system as EFI system, even if not recognized as such automatically." declare-flag run-qemu efi-vars "path" "For »--efi« systems, path to a file storing the EFI variables. The default is in »XDG_RUNTIME_DIR«, i.e. it does not persist across host reboots." declare-flag run-qemu graphic "" "Open a graphical window even of the target system logs to serial and not (explicitly) TTY1." -declare-flag run-qemu install "[always]" "If any of the guest system's disk images does not exist, perform the its installation before starting the VM. If set to »always«, always install before starting the VM. With this flag set, »diskImages« defaults to paths in »/tmp/." +declare-flag run-qemu install "[1|always]" "If any of the guest system's disk images does not exist, perform the its installation before starting the VM. If set to »always«, always install before starting the VM. With this flag set, »diskImages« defaults to paths in »/tmp/." declare-flag run-qemu mem "num" "VM RAM in MiB (»qemu -m«)." declare-flag run-qemu nat-fw "forwards" "Port forwards to the guest's NATed NIC. E.g: »--nat-fw=:8000-:8000,:8001-:8001,127.0.0.1:2022-:22«." declare-flag run-qemu no-kvm "" "Do not rey to use (or complain about the unavailability of) KVM." diff --git a/lib/setup-scripts/zfs.sh b/lib/setup-scripts/zfs.sh index ea94a0c..1205d8a 100644 --- a/lib/setup-scripts/zfs.sh +++ b/lib/setup-scripts/zfs.sh @@ -53,7 +53,6 @@ function ensure-datasets { if (( @{#config.setup.zfs.datasets[@]} == 0 )) ; then \return ; fi local mnt=$1 ; while [[ "$mnt" == */ ]] ; do mnt=${mnt:0:(-1)} ; done # (remove any tailing slashes) local filterExp=${2:-'^'} - local tmpMnt=$(mktemp -d) ; trap "rmdir $tmpMnt" EXIT local zfs=@{native.zfs}/bin/zfs local name ; while IFS= read -u3 -r -d $'\0' name ; do @@ -119,7 +118,8 @@ function ensure-datasets { ( PATH=@{native.zfs}/bin ; ${_set_x:-:} ; zfs create "${zfsCreate[@]}" "${dataset[name]}" ) || exit fi if [[ ${props[canmount]} != off ]] ; then ( - @{native.util-linux}/bin/mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt && trap "@{native.util-linux}/bin/umount '${dataset[name]}'" EXIT && + tmpMnt=$(mktemp -d) ; trap "" EXIT && @{native.util-linux}/bin/mount -t zfs -o zfsutil "${dataset[name]}" $tmpMnt && + trap "@{native.util-linux}/bin/umount '${dataset[name]}' ; rmdir $tmpMnt" EXIT && chmod 000 -- "$tmpMnt" && chown "${dataset[uid]}:${dataset[gid]}" -- "$tmpMnt" && chmod "${dataset[mode]}" -- "$tmpMnt" ) || exit ; fi if [[ $explicitKeylocation && $explicitKeylocation != "${props[keylocation]:-}" ]] ; then diff --git a/modules/installer.nix.md b/modules/installer.nix.md index 4910f80..7361ae8 100644 --- a/modules/installer.nix.md +++ b/modules/installer.nix.md @@ -43,8 +43,8 @@ in { postInstall = lib.mkOption { description = desc "just before unmounting the new system" true; type = lib.types.lines; default = ""; }; }; outputName = lib.mkOption { - description = ''The name this system is exported as by its defining flake (as »nixosConfigurations.''${outputName}« and »apps.*-linux.''${outputName}«).''; - type = lib.types.nullOr lib.types.str; # This explicitly does not have a default, so that accessing it when it is not set creates an error. + description = ''The name this system is (/ should be) exported as by its defining flake (as »nixosConfigurations.''${outputName}« and »apps.*-linux.''${outputName}«).''; + type = lib.types.nullOr lib.types.str; default = null; }; build.scripts = lib.mkOption { type = lib.types.functionTo lib.types.str; internal = true; readOnly = true; diff --git a/modules/setup/bootpart.nix.md b/modules/setup/bootpart.nix.md index aedd161..98945f1 100644 --- a/modules/setup/bootpart.nix.md +++ b/modules/setup/bootpart.nix.md @@ -18,7 +18,7 @@ in { options = { ${setup}.bootpart = { enable = lib.mkEnableOption "configuration of a boot partition as GPT partition 1 on the »primary« disk and a FAT32 filesystem on it"; mountpoint = lib.mkOption { description = "Path at which to mount a vfat boot partition."; type = lib.types.str; default = "/boot"; }; - createMbrPart = lib.mkOption { description = "Whether to create a hybrid MBR with (only) the boot partition listed as partition 1."; type = lib.types.bool; default = true; }; + createMbr = lib.mkOption { description = "Whether to create a hybrid MBR with (only) the boot partition listed as partition 1."; type = lib.types.bool; default = true; }; size = lib.mkOption { description = "Size of the boot partition, should be *more* than 32M(iB)."; type = lib.types.str; default = "2G"; }; }; }; @@ -27,7 +27,7 @@ in { ${setup} = { disks.partitions."boot-${hash}" = { type = lib.mkDefault "ef00"; size = lib.mkDefault cfg.size; index = lib.mkDefault 1; order = lib.mkDefault 1500; disk = lib.mkOptionDefault "primary"; }; # require it to be part1, and create it early - disks.devices = lib.mkIf cfg.createMbrPart { primary = { mbrParts = lib.mkDefault "1"; extraFDiskCommands = '' + disks.devices = lib.mkIf cfg.createMbr { primary = { mbrParts = lib.mkDefault "1"; extraFDiskCommands = '' t;1;c # type ; part1 ; W95 FAT32 (LBA) a;1 # active/boot ; part1 ''; }; }; diff --git a/modules/setup/zfs.nix.md b/modules/setup/zfs.nix.md index 7eafb4e..2c165c7 100644 --- a/modules/setup/zfs.nix.md +++ b/modules/setup/zfs.nix.md @@ -121,7 +121,8 @@ in let module = { } ]; - }) (lib.mkIf (config.boot.resumeDevice != "") { ## Make resuming after hibernation safe with ZFS: + }) (lib.mkIf (false && (config.boot.resumeDevice != "")) { ## Make resuming after hibernation safe with ZFS: + # or not: https://github.com/NixOS/nixpkgs/commit/c70f0473153c63ad1cf6fbea19f290db6b15291f boot.kernelParams = [ "resume=${config.boot.resumeDevice}" ]; assertions = [ { # Just making sure ... diff --git a/modules/vm-exec.nix.md b/modules/vm-exec.nix.md index 4b220dc..ffa442a 100644 --- a/modules/vm-exec.nix.md +++ b/modules/vm-exec.nix.md @@ -29,20 +29,20 @@ nix run .../nixos-config'#'nixosConfigurations.${hostName}.config.system.build.v ```nix #*/# end of MarkDown, beginning of NixOS module: -dirname: inputs: { config, options, pkgs, lib, modulesPath, extendModules, ... }: let lib = inputs.self.lib.__internal__; in let +dirname: inputs: moduleArgs@{ config, options, pkgs, lib, modulesPath, extendModules, ... }: let lib = inputs.self.lib.__internal__; in let mainModule = (suffix: extraModule: let prefix = inputs.config.prefix; - cfg = config.virtualisation.vmVariantExec; + cfg = config.virtualisation."vmVariant${suffix}"; in let hostModule = { - options = { virtualisation.vmVariantExec = lib.mkOption { + options = { virtualisation."vmVariant${suffix}" = lib.mkOption { description = lib.mdDoc ''Machine configuration to be added to the system's qemu exec VM.''; - inherit (extendModules { modules = [ "${modulesPath}/virtualisation/qemu-vm.nix" vmModule ]; }) type; + inherit (extendModules { modules = [ "${modulesPath}/virtualisation/qemu-vm.nix" vmModule extraModule ]; }) type; default = { }; visible = "shallow"; }; }; config = { - system.build.vmExec = (let hostPkgs = pkgs; in let + system.build."vm${suffix}" = (let hostPkgs = pkgs; in let name = "run-${config.system.name}-vm-exec"; launch = "${cfg.system.build.vm}/bin/${cfg.system.build.vm.meta.mainProgram}"; pkgs = if cfg.virtualisation?host.pkgs then cfg.virtualisation.host.pkgs else hostPkgs; @@ -158,7 +158,7 @@ in let hostModule = { }) ({ - virtualisation = if (builtins.substring 0 5 pkgs.lib.version) > "22.05" then { host.pkgs = pkgs.buildPackages; } else { }; + virtualisation = if (builtins.substring 0 5 pkgs.lib.version) > "22.05" then { host.pkgs = lib.mkDefault pkgs.buildPackages; } else { }; }) ({ virtualisation.qemu.package = lib.mkIf (pkgs.buildPackages.system != pkgs.system) (cfg.virtualisation.host or { pkgs = pkgs.buildPackages; }).pkgs.qemu_full; @@ -168,9 +168,13 @@ in let hostModule = { services.qemuGuest.enable = lib.mkForce false; # tag this to make clearer what's what - system.nixos.tags = [ "vmExec" ]; - system.build.isVmExec = true; + system.nixos.tags = [ "vm${suffix}" ]; + system.build."isVm${suffix}" = true; }) ]; -}; in hostModule +}; in hostModule); in { imports = [ (mainModule "Exec" { }) ] ++ (map (system: ( + mainModule "Exec-${system}" { + virtualisation.host.pkgs = import (moduleArgs.inputs.nixpkgs or inputs.nixpkgs).outPath { inherit (pkgs) overlays config; inherit system; }; + } +)) [ "aarch64-linux" "x86_64-linux" ]); }