From 93289c7c094c8076d30804f20ffeeed4a1f9f5ec Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 09:28:40 +0200 Subject: [PATCH 01/90] CI: Move CI code into a single script --- .travis.yml | 8 +------- ci/run_shellcheck.sh | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 7 deletions(-) create mode 100755 ci/run_shellcheck.sh diff --git a/.travis.yml b/.travis.yml index 1194247..27cee52 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,10 +5,4 @@ cache: directories: - "/opt/shellcheck" script: -- mkdir -p /opt/shellcheck -- "[[ ! -e /opt/shellcheck/shellcheck ]] && wget -qO- https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz - | tar -xJv -O shellcheck-stable/shellcheck | sudo tee /opt/shellcheck/shellcheck - > /dev/null || true" -- sudo chmod +x /opt/shellcheck/shellcheck -- "/opt/shellcheck/shellcheck --version" -- "/opt/shellcheck/shellcheck $(grep -lzP '^#!/bin/\\w+sh' -r .)" +- ci/run_shellcheck diff --git a/ci/run_shellcheck.sh b/ci/run_shellcheck.sh new file mode 100755 index 0000000..0b32cbc --- /dev/null +++ b/ci/run_shellcheck.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -o pipefail +set -o errexit +set -o nounset +set -o errtrace +shopt -s inherit_errexit + +ci/run_shellcheck mkdir -p /opt/shellcheck +[[ ! -e /opt/shellcheck/shellcheck ]] && wget -qO- https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz + | tar -xJv -O shellcheck-stable/shellcheck | sudo tee /opt/shellcheck/shellcheck + > /dev/null || true +sudo chmod +x /opt/shellcheck/shellcheck +/opt/shellcheck/shellcheck --version +/opt/shellcheck/shellcheck $(grep -lzP '^#!/bin/\\w+sh' -r .) From 45def315942bd51c148e20035c601dd1253cd53c Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 09:30:46 +0200 Subject: [PATCH 02/90] Improvements to CI script --- ci/run_shellcheck.sh | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/ci/run_shellcheck.sh b/ci/run_shellcheck.sh index 0b32cbc..cab0bca 100755 --- a/ci/run_shellcheck.sh +++ b/ci/run_shellcheck.sh @@ -6,10 +6,16 @@ set -o nounset set -o errtrace shopt -s inherit_errexit -ci/run_shellcheck mkdir -p /opt/shellcheck -[[ ! -e /opt/shellcheck/shellcheck ]] && wget -qO- https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz - | tar -xJv -O shellcheck-stable/shellcheck | sudo tee /opt/shellcheck/shellcheck - > /dev/null || true -sudo chmod +x /opt/shellcheck/shellcheck +if [[ ! -e /opt/shellcheck/shellcheck ]]; then + mkdir -p /opt/shellcheck + + wget -qO- https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz \ + | tar xJv -O shellcheck-stable/shellcheck \ + > /opt/shellcheck/shellcheck + + chmod +x /opt/shellcheck/shellcheck +fi + /opt/shellcheck/shellcheck --version -/opt/shellcheck/shellcheck $(grep -lzP '^#!/bin/\\w+sh' -r .) + +grep -lZP '^#!/bin/\w+sh' -R | xargs -0 /opt/shellcheck/shellcheck From 7456660ae42faf84331481264d049ca9f1bfe101 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 09:38:43 +0200 Subject: [PATCH 03/90] Replace Travis with GitHub actions --- .github/workflows/ci.yml | 11 +++++++++++ .travis.yml | 8 -------- 2 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/ci.yml delete mode 100644 .travis.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..e19289c --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,11 @@ +name: CI + +on: pull_request + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Run shellcheck + run: ci/run_shellcheck.sh diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 27cee52..0000000 --- a/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -dist: bionic -language: bash -cache: - directories: - - "/opt/shellcheck" -script: -- ci/run_shellcheck From b83243d3f290b8b9edb0ed8cf41edc9f317a6aa2 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 09:56:14 +0200 Subject: [PATCH 04/90] CI: Always download the latest Shellcheck version The previous logic was also semantically incorrect, as the file was never cached, therefore, the `if` was redundant. --- ci/run_shellcheck.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/ci/run_shellcheck.sh b/ci/run_shellcheck.sh index cab0bca..a54d11b 100755 --- a/ci/run_shellcheck.sh +++ b/ci/run_shellcheck.sh @@ -6,15 +6,19 @@ set -o nounset set -o errtrace shopt -s inherit_errexit -if [[ ! -e /opt/shellcheck/shellcheck ]]; then - mkdir -p /opt/shellcheck +# Always download the latest version: +# +# - it's fast and stable enough not to worry about it; +# - the workflow is basically single-person, so there's no risk of a new dev encountering an error found +# by a new shellcheck version. - wget -qO- https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz \ - | tar xJv -O shellcheck-stable/shellcheck \ - > /opt/shellcheck/shellcheck +mkdir -p /opt/shellcheck - chmod +x /opt/shellcheck/shellcheck -fi +wget -qO- https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz \ + | tar xJv -O shellcheck-stable/shellcheck \ + > /opt/shellcheck/shellcheck + +chmod +x /opt/shellcheck/shellcheck /opt/shellcheck/shellcheck --version From 6d5a9ccbafb6fae2e80f84fc0066f16d07428f1d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 12:06:04 +0200 Subject: [PATCH 05/90] Remove references to obsolete flag ZFS_ENCRYPT_RPOOL --- README.md | 1 - install-zfs.sh | 1 - 2 files changed, 2 deletions(-) diff --git a/README.md b/README.md index 77cf852..6654515 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,6 @@ The procedure can be entirely automated via environment variables: - ZFS_OS_INSTALLATION_SCRIPT : path of a script to execute instead of Ubiquity (see dedicated section below) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated -- ZFS_ENCRYPT_RPOOL : set 1 to encrypt the pool - ZFS_PASSPHRASE - ZFS_BPOOL_NAME - ZFS_RPOOL_NAME diff --git a/install-zfs.sh b/install-zfs.sh index 48bb080..20e8367 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -171,7 +171,6 @@ The procedure can be entirely automated via environment variables: - ZFS_OS_INSTALLATION_SCRIPT : path of a script to execute instead of Ubiquity (see dedicated section below) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated - ZFS_BOOT_PARTITION_SIZE : integer number with `M` or `G` suffix (defaults to `'${c_default_boot_partition_size}M'`) -- ZFS_ENCRYPT_RPOOL : set 1 to encrypt the pool - ZFS_PASSPHRASE : set non-blank to encrypt the pool, and blank not to. if unset, it will be asked. - ZFS_DEBIAN_ROOT_PASSWORD - ZFS_BPOOL_NAME From 1f21b17b416ecfd41bc3c5cac3c7c2a351154e9f Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 12:12:56 +0200 Subject: [PATCH 06/90] Cosmetic cleanup: Convert apt `-y` to `--yes` --- install-zfs.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 20e8367..d808caa 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -721,7 +721,7 @@ function install_host_packages_elementary { if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then apt update - apt install -y software-properties-common + apt install --yes software-properties-common fi install_host_packages @@ -752,7 +752,7 @@ function install_host_packages_UbuntuServer { # this will be a no-op. # apt update - apt install -y "linux-headers-$(uname -r)" + apt install --yes "linux-headers-$(uname -r)" install_host_packages else @@ -1156,7 +1156,7 @@ APT' function install_jail_zfs_packages_elementary { print_step_info_header - chroot_execute "apt install -y software-properties-common" + chroot_execute "apt install --yes software-properties-common" install_jail_zfs_packages } From f2043be75c0f6c6a3ab1a07540034e6426788380 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 12:50:06 +0200 Subject: [PATCH 07/90] Cosmetic: Clean variables subdivision Two groups were actually the same concept. --- install-zfs.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index d808caa..443b748 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -12,11 +12,6 @@ set -o nounset # VARIABLES/CONSTANTS ########################################################## -# Variables set by the script - -v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) -v_zfs_08_in_repository= # 1=true, false otherwise (applies only to Ubuntu-based) - # Variables set (indirectly) by the user # # The passphrase has a special workflow - it's sent to a named pipe (see create_passphrase_named_pipe()). @@ -38,6 +33,8 @@ v_free_tail_space= # integer # Variables set during execution +v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) +v_zfs_08_in_repository= # 1=true, false otherwise (applies only to Ubuntu-based) v_temp_volume_device= # /dev/zdN; scope: setup_partitions -> sync_os_temp_installation_dir_to_rpool v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_disks -> select_disk From bf1c5d67accb23c838f4ee839820a43e5532629b Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 12:40:21 +0200 Subject: [PATCH 08/90] Simplification in ZFS package version logic --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 443b748..0d71e2f 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -369,7 +369,7 @@ function find_zfs_package_requirements { apt update local zfs_package_version - zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print $1 if /^Version: (\d+\.\d+)\./') + zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') if [[ -n $zfs_package_version ]]; then if [[ ! $zfs_package_version =~ ^0\. ]]; then From 9cfdb59a28940ca3f6eeafe71645242a74b61f49 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 12:42:40 +0200 Subject: [PATCH 09/90] Use more specific and generic way of checking the ZFS package version This is actually required for allowing OpenZFS v2.0. --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 0d71e2f..b9e171b 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -375,7 +375,7 @@ function find_zfs_package_requirements { if [[ ! $zfs_package_version =~ ^0\. ]]; then >&2 echo "Unsupported ZFS version!: $zfs_package_version" exit 1 - elif (( $(echo "$zfs_package_version" | cut -d. -f2) >= 8 )); then + elif dpkg --compare-versions "$zfs_package_version" ge 0.8; then v_zfs_08_in_repository=1 fi fi From faa1c3998ff9b21758bdf76b146169422c4aa54d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 12:45:53 +0200 Subject: [PATCH 10/90] Remove check for unsupported ZFS versions (and simplify logic) 2.0 is going to be supported in this branch, so non-0.x versions must not raise an error. --- install-zfs.sh | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index b9e171b..db28e31 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -371,13 +371,10 @@ function find_zfs_package_requirements { local zfs_package_version zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') - if [[ -n $zfs_package_version ]]; then - if [[ ! $zfs_package_version =~ ^0\. ]]; then - >&2 echo "Unsupported ZFS version!: $zfs_package_version" - exit 1 - elif dpkg --compare-versions "$zfs_package_version" ge 0.8; then - v_zfs_08_in_repository=1 - fi + # Test returns false if $zfs_package_version is blank. + # + if dpkg --compare-versions "$zfs_package_version" ge 0.8; then + v_zfs_08_in_repository=1 fi } From a9dd106e08686bd006797e1360caddae2f71f281 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 12:56:05 +0200 Subject: [PATCH 11/90] Invert ZFS repository logic ("version 0.8 in repo" -> "PPA required") --- install-zfs.sh | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index db28e31..76a68e3 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -34,7 +34,7 @@ v_free_tail_space= # integer # Variables set during execution v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) -v_zfs_08_in_repository= # 1=true, false otherwise (applies only to Ubuntu-based) +v_use_ppa= # 1=true, false otherwise (applies only to Ubuntu-based). v_temp_volume_device= # /dev/zdN; scope: setup_partitions -> sync_os_temp_installation_dir_to_rpool v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_disks -> select_disk @@ -363,7 +363,7 @@ If you think this is a bug, please open an issue on https://github.com/saveriomi # Fortunately, with Debian-specific logic isolated, we need conditionals based only on #2 - see # install_host_packages() and install_host_packages_UbuntuServer(). # -function find_zfs_package_requirements { +function set_zfs_ppa_requirement { print_step_info_header apt update @@ -371,14 +371,14 @@ function find_zfs_package_requirements { local zfs_package_version zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') - # Test returns false if $zfs_package_version is blank. + # Test returns true if $zfs_package_version is blank. # - if dpkg --compare-versions "$zfs_package_version" ge 0.8; then - v_zfs_08_in_repository=1 + if dpkg --compare-versions "$zfs_package_version" lt 0.8; then + v_use_ppa=1 fi } -function find_zfs_package_requirements_Debian { +function set_zfs_ppa_requirement_Debian { # Only update apt; in this case, ZFS packages are handled in a specific way. apt update @@ -387,12 +387,12 @@ function find_zfs_package_requirements_Debian { # Mint 20 has the CDROM repository enabled, but apt fails when updating due to it (or possibly due # to it being incorrectly setup). # -function find_zfs_package_requirements_Linuxmint { +function set_zfs_ppa_requirement_Linuxmint { print_step_info_header perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list - find_zfs_package_requirements + set_zfs_ppa_requirement } # By using a FIFO, we avoid having to hide statements like `echo $v_passphrase | zpoool create ...` @@ -657,7 +657,7 @@ The option `-O devices=off` is already set, and must not be specified.' function install_host_packages { print_step_info_header - if [[ $v_zfs_08_in_repository != "1" ]]; then + if [[ $v_use_ppa == "1" ]]; then if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then add-apt-repository --yes ppa:jonathonf/zfs apt update @@ -724,7 +724,7 @@ function install_host_packages_elementary { function install_host_packages_UbuntuServer { print_step_info_header - if [[ $v_zfs_08_in_repository == "1" ]]; then + if [[ $v_use_ppa != "1" ]]; then apt install --yes zfsutils-linux efibootmgr zfs --version > "$c_zfs_module_version_log" 2>&1 @@ -1103,7 +1103,7 @@ function prepare_jail { function install_jail_zfs_packages { print_step_info_header - if [[ $v_zfs_08_in_repository != "1" ]]; then + if [[ $v_use_ppa == "1" ]]; then chroot_execute "add-apt-repository --yes ppa:jonathonf/zfs" chroot_execute "apt update" @@ -1158,7 +1158,7 @@ function install_jail_zfs_packages_elementary { function install_jail_zfs_packages_UbuntuServer { print_step_info_header - if [[ $v_zfs_08_in_repository == "1" ]]; then + if [[ $v_use_ppa != "1" ]]; then chroot_execute "apt install --yes zfsutils-linux zfs-initramfs grub-efi-amd64-signed shim-signed" else install_jail_zfs_packages @@ -1421,7 +1421,7 @@ store_running_processes check_prerequisites display_intro_banner find_suitable_disks -distro_dependent_invoke "find_zfs_package_requirements" +distro_dependent_invoke "set_zfs_ppa_requirement" create_passphrase_named_pipe select_disks From 55acc65a13eb8b26421e4e0bbb32a023f16f8a65 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 13:09:53 +0200 Subject: [PATCH 12/90] Add ZFS_USE_PPA option --- install-zfs.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 76a68e3..9e42cf5 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -166,6 +166,7 @@ This script needs to be run with admin permissions, from a Live CD. The procedure can be entirely automated via environment variables: - ZFS_OS_INSTALLATION_SCRIPT : path of a script to execute instead of Ubiquity (see dedicated section below) +- ZFS_USE_PPA : set to 1 to use packages from `ppa:jonathonf/zfs` (automatically set to true if the O/S version doesn'\''t ship at least v0.8) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated - ZFS_BOOT_PARTITION_SIZE : integer number with `M` or `G` suffix (defaults to `'${c_default_boot_partition_size}M'`) - ZFS_PASSPHRASE : set non-blank to encrypt the pool, and blank not to. if unset, it will be asked. @@ -373,7 +374,7 @@ function set_zfs_ppa_requirement { # Test returns true if $zfs_package_version is blank. # - if dpkg --compare-versions "$zfs_package_version" lt 0.8; then + if [[ ${ZFS_USE_PPA:-} == "1" ]] || dpkg --compare-versions "$zfs_package_version" lt 0.8; then v_use_ppa=1 fi } From 7a97c247ad57073a03b7d30c3e9f90dffac3e711 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 13:12:00 +0200 Subject: [PATCH 13/90] README: Remove `sh` code block formatting for options section Gets messy in some cases (e.g. with single quotes), and doesn't make much sense, since there is only one command. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6654515..7f5d210 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ The installer itself can run over SSH (\[S\]Ubiquity of course needs to be still The program supports unattended installation, via environment variables. The program built-in help explains all the options: -```sh +``` $ wget -qO- https://git.io/JelI5 | bash /dev/stdin --help Usage: install-zfs.sh [-h|--help] From 46f9b652a6aac12704e15a27018c5b83037eebfa Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 13:14:20 +0200 Subject: [PATCH 14/90] README: Update in relation the the "Use PPA" new option --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7f5d210..b725bb2 100644 --- a/README.md +++ b/README.md @@ -43,10 +43,11 @@ The advantages of this project over the Ubuntu installer are: 2. it allows specifying the RAID type; 3. it allows customization of the disk partitions; 4. it supports additional features (e.g. encryption); -5. it supports many more operating systems; -6. it supports unattended installations, via custom scripts; -7. it installs a convenient trimming job for ZFS pools; -8. it's easy to extend. +5. it supports new OpenZFS versions, via PPA `jonathonf/zfs`. +6. it supports many more operating systems; +7. it supports unattended installations, via custom scripts; +8. it installs a convenient trimming job for ZFS pools; +9. it's easy to extend. The disadvantages are: @@ -96,6 +97,7 @@ This script needs to be run with admin permissions, from a Live CD. The procedure can be entirely automated via environment variables: - ZFS_OS_INSTALLATION_SCRIPT : path of a script to execute instead of Ubiquity (see dedicated section below) +- ZFS_USE_PPAS : set to 1 to use packages from `ppa:jonathonf/zfs` (automatically set to true if the O/S version doesn't ship at least v0.8) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated - ZFS_PASSPHRASE - ZFS_BPOOL_NAME From 9b8e667e74b426de9c7bb1fdf4a826648904e485 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 14:39:22 +0200 Subject: [PATCH 15/90] README: Simplify markdown for list of advantages --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index b725bb2..8fb1bed 100644 --- a/README.md +++ b/README.md @@ -40,14 +40,14 @@ As of 20.04, Canonical makes available an experimental ZFS installer on Ubuntu D The advantages of this project over the Ubuntu installer are: 1. it supports pools configuration; -2. it allows specifying the RAID type; -3. it allows customization of the disk partitions; -4. it supports additional features (e.g. encryption); -5. it supports new OpenZFS versions, via PPA `jonathonf/zfs`. -6. it supports many more operating systems; -7. it supports unattended installations, via custom scripts; -8. it installs a convenient trimming job for ZFS pools; -9. it's easy to extend. +1. it allows specifying the RAID type; +1. it allows customization of the disk partitions; +1. it supports additional features (e.g. encryption); +1. it supports new OpenZFS versions, via PPA `jonathonf/zfs`. +1. it supports many more operating systems; +1. it supports unattended installations, via custom scripts; +1. it installs a convenient trimming job for ZFS pools; +1. it's easy to extend. The disadvantages are: From 09c6fce0dc34b9aeea5f02db925cd3da4e02a492 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 14:40:55 +0200 Subject: [PATCH 16/90] Generalize reference to ZFS make logfile --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 9e42cf5..4976017 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -664,7 +664,7 @@ function install_host_packages { apt update # Libelf-dev allows `CONFIG_STACK_VALIDATION` to be set - it's optional, but good to have. - # Module compilation log: `/var/lib/dkms/zfs/0.8.2/build/make.log` (adjust according to version). + # Module compilation log: `/var/lib/dkms/zfs/**/*/make.log` (adjust according to version). # echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections apt install --yes libelf-dev zfs-dkms From 136b392612b0d27050605b745091bb139f8e9dc2 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 14:42:24 +0200 Subject: [PATCH 17/90] Convert PPA address strings to constant --- install-zfs.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 4976017..1a236f7 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -43,6 +43,7 @@ v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_d # Note that Linux Mint is "Linuxmint" from v20 onwards. This actually helps, since some operations are # specific to it. +c_ppa=ppa:jonathonf/zfs c_efi_system_partition_size=512 # megabytes c_default_boot_partition_size=2048 # megabytes c_default_bpool_tweaks="-o ashift=12" @@ -166,7 +167,7 @@ This script needs to be run with admin permissions, from a Live CD. The procedure can be entirely automated via environment variables: - ZFS_OS_INSTALLATION_SCRIPT : path of a script to execute instead of Ubiquity (see dedicated section below) -- ZFS_USE_PPA : set to 1 to use packages from `ppa:jonathonf/zfs` (automatically set to true if the O/S version doesn'\''t ship at least v0.8) +- ZFS_USE_PPA : set to 1 to use packages from `'"$c_ppa"'` (automatically set to true if the O/S version doesn'\''t ship at least v0.8) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated - ZFS_BOOT_PARTITION_SIZE : integer number with `M` or `G` suffix (defaults to `'${c_default_boot_partition_size}M'`) - ZFS_PASSPHRASE : set non-blank to encrypt the pool, and blank not to. if unset, it will be asked. @@ -660,7 +661,7 @@ function install_host_packages { if [[ $v_use_ppa == "1" ]]; then if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then - add-apt-repository --yes ppa:jonathonf/zfs + add-apt-repository --yes "$c_ppa" apt update # Libelf-dev allows `CONFIG_STACK_VALIDATION` to be set - it's optional, but good to have. @@ -1105,7 +1106,7 @@ function install_jail_zfs_packages { print_step_info_header if [[ $v_use_ppa == "1" ]]; then - chroot_execute "add-apt-repository --yes ppa:jonathonf/zfs" + chroot_execute "add-apt-repository --yes $c_ppa" chroot_execute "apt update" From 7f239e7b483a5896a265e7176deec3889c08004d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 17:53:46 +0200 Subject: [PATCH 18/90] Add memory check (when PPA is selected) --- install-zfs.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index 1a236f7..bb1ca06 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -46,6 +46,7 @@ v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_d c_ppa=ppa:jonathonf/zfs c_efi_system_partition_size=512 # megabytes c_default_boot_partition_size=2048 # megabytes +c_memory_warning_limit=2880 # megabytes; not set to 3072 because on some systems, some RAM is occupied/shared c_default_bpool_tweaks="-o ashift=12" c_default_rpool_tweaks="-o ashift=12 -O acltype=posixacl -O compression=lz4 -O dnodesize=auto -O relatime=on -O xattr=sa -O normalization=formD" c_zfs_mount_dir=/mnt @@ -377,6 +378,19 @@ function set_zfs_ppa_requirement { # if [[ ${ZFS_USE_PPA:-} == "1" ]] || dpkg --compare-versions "$zfs_package_version" lt 0.8; then v_use_ppa=1 + + local system_memory + system_memory=$(free -m | perl -lane 'print @F[1] if $. == 2') + + if [[ $system_memory -lt $c_memory_warning_limit && -z ${ZFS_NO_INFO_MESSAGES:-} ]]; then + local dialog_message='WARNING! The PPA is used, which requires compiling the ZFS module. + +On systems with relatively little RAM (less than around 3 GiB), the procedure may crash during the compilation. + +In case of crash due to low memory, no error message is displayed; the only traces are `Killed process` messages in the syslog.' + + whiptail --msgbox "$dialog_message" 30 100 + fi fi } From 4b8a7aa7cbdd9405738df20df59bc79e82d76a78 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 21:46:53 +0200 Subject: [PATCH 19/90] Remove reference to old guide, and add a comment --- install-zfs.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index bb1ca06..7f51d88 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1002,8 +1002,8 @@ function create_pools { # POOLS CREATION ##################### - # See https://github.com/zfsonlinux/zfs/wiki/Ubuntu-18.04-Root-on-ZFS for the details. - # + # The root pool must be created first, since the boot pool mountpoint is inside it. + # `-R` creates an "Alternate Root Point", which is lost on unmount; it's just a convenience for a temporary mountpoint; # `-f` force overwrite partitions is existing - in some cases, even after wipefs, a filesystem is mistakenly recognized # `-O` set filesystem properties on a pool (pools and filesystems are distincted entities, however, a pool includes an FS by default). @@ -1018,8 +1018,6 @@ function create_pools { "$v_rpool_name" $v_pools_raid_type "${rpool_disks_partitions[@]}" \ < "$c_passphrase_named_pipe" - # `-d` disable all the pool features (not used here); - # # shellcheck disable=SC2086 # TODO: See above zpool create \ "${v_bpool_tweaks[@]}" \ From 796edbcec9bce1fd2426ae0279d6c164af217cd2 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 18:49:22 +0200 Subject: [PATCH 20/90] Refactoring: Convert the default pool tweak variables to array --- install-zfs.sh | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 7f51d88..3107c93 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -47,8 +47,18 @@ c_ppa=ppa:jonathonf/zfs c_efi_system_partition_size=512 # megabytes c_default_boot_partition_size=2048 # megabytes c_memory_warning_limit=2880 # megabytes; not set to 3072 because on some systems, some RAM is occupied/shared -c_default_bpool_tweaks="-o ashift=12" -c_default_rpool_tweaks="-o ashift=12 -O acltype=posixacl -O compression=lz4 -O dnodesize=auto -O relatime=on -O xattr=sa -O normalization=formD" +c_default_bpool_tweaks=( + -o ashift=12 +) +c_default_rpool_tweaks=( + -o ashift=12 + -O acltype=posixacl + -O compression=lz4 + -O dnodesize=auto + -O normalization=formD + -O relatime=on + -O xattr=sa +) c_zfs_mount_dir=/mnt c_installed_os_data_mount_dir=/target declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) @@ -175,8 +185,8 @@ The procedure can be entirely automated via environment variables: - ZFS_DEBIAN_ROOT_PASSWORD - ZFS_BPOOL_NAME - ZFS_RPOOL_NAME -- ZFS_BPOOL_TWEAKS : boot pool options to set on creation (defaults to `'$c_default_bpool_tweaks'`) -- ZFS_RPOOL_TWEAKS : root pool options to set on creation (defaults to `'$c_default_rpool_tweaks'`) +- ZFS_BPOOL_TWEAKS : boot pool options to set on creation (see defaults below) +- ZFS_RPOOL_TWEAKS : root pool options to set on creation (see defaults below) - ZFS_POOLS_RAID_TYPE : options: blank (striping), `mirror`, `raidz`, `raidz2`, `raidz3`; if unset, it will be asked. - ZFS_NO_INFO_MESSAGES : set 1 to skip informational messages - ZFS_SWAP_SIZE : swap size (integer); set 0 for no swap @@ -189,6 +199,10 @@ When installing the O/S via $ZFS_OS_INSTALLATION_SCRIPT, the root pool is mounte 1. the virtual filesystems must be mounted in `'$c_zfs_mount_dir'` (ie. `for vfs in proc sys dev; do mount --rbind /$vfs '$c_zfs_mount_dir'/$vfs; done`) 2. internet must be accessible while chrooting in `'$c_zfs_mount_dir'` (ie. `echo nameserver 8.8.8.8 >> '$c_zfs_mount_dir'/etc/resolv.conf`) 3. `'$c_zfs_mount_dir'` must be left in a dismountable state (e.g. no file locks, no swap etc.); + +Boot pool default tweaks: '"${c_default_bpool_tweaks[@]/#-/$'\n' -}"' + +Root pool default tweaks: '"${c_default_rpool_tweaks[*]/#-/$'\n' -}"' ' echo "$help" @@ -655,7 +669,7 @@ function ask_pool_tweaks { The option `-O devices=off` is already set, and must not be specified.' - local raw_bpool_tweaks=${ZFS_BPOOL_TWEAKS:-$(whiptail --inputbox "$bpool_tweaks_message" 30 100 -- "$c_default_bpool_tweaks" 3>&1 1>&2 2>&3)} + local raw_bpool_tweaks=${ZFS_BPOOL_TWEAKS:-$(whiptail --inputbox "$bpool_tweaks_message" 30 100 -- "${c_default_bpool_tweaks[*]}" 3>&1 1>&2 2>&3)} mapfile -d' ' -t v_bpool_tweaks < <(echo -n "$raw_bpool_tweaks") @@ -663,7 +677,7 @@ The option `-O devices=off` is already set, and must not be specified.' The option `-O devices=off` is already set, and must not be specified.' - local raw_rpool_tweaks=${ZFS_RPOOL_TWEAKS:-$(whiptail --inputbox "$rpool_tweaks_message" 30 100 -- "$c_default_rpool_tweaks" 3>&1 1>&2 2>&3)} + local raw_rpool_tweaks=${ZFS_RPOOL_TWEAKS:-$(whiptail --inputbox "$rpool_tweaks_message" 30 100 -- "${c_default_rpool_tweaks[*]}" 3>&1 1>&2 2>&3)} mapfile -d' ' -t v_rpool_tweaks < <(echo -n "$raw_rpool_tweaks") From 1333f86cd8469d2bf4d9df6954bf9bac0d160140 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 19:15:06 +0200 Subject: [PATCH 21/90] Refactoring: Rename the "tweaks" concept to "create options", and consolidate them This is in preparation for the procedure update. --- install-zfs.sh | 48 +++++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 3107c93..b2cd838 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -22,10 +22,10 @@ set -o nounset v_boot_partition_size= # Integer number with `M` or `G` suffix v_bpool_name= -v_bpool_tweaks= # array; see defaults below for format +v_bpool_create_options= # array; see defaults below for format v_root_password= # Debian-only v_rpool_name= -v_rpool_tweaks= # array; see defaults below for format +v_rpool_create_options= # array; see defaults below for format v_pools_raid_type= declare -a v_selected_disks # (/dev/by-id/disk_id, ...) v_swap_size= # integer @@ -47,10 +47,11 @@ c_ppa=ppa:jonathonf/zfs c_efi_system_partition_size=512 # megabytes c_default_boot_partition_size=2048 # megabytes c_memory_warning_limit=2880 # megabytes; not set to 3072 because on some systems, some RAM is occupied/shared -c_default_bpool_tweaks=( +c_default_bpool_create_options=( -o ashift=12 + -O devices=off ) -c_default_rpool_tweaks=( +c_default_rpool_create_options=( -o ashift=12 -O acltype=posixacl -O compression=lz4 @@ -58,6 +59,7 @@ c_default_rpool_tweaks=( -O normalization=formD -O relatime=on -O xattr=sa + -O devices=off ) c_zfs_mount_dir=/mnt c_installed_os_data_mount_dir=/target @@ -185,8 +187,8 @@ The procedure can be entirely automated via environment variables: - ZFS_DEBIAN_ROOT_PASSWORD - ZFS_BPOOL_NAME - ZFS_RPOOL_NAME -- ZFS_BPOOL_TWEAKS : boot pool options to set on creation (see defaults below) -- ZFS_RPOOL_TWEAKS : root pool options to set on creation (see defaults below) +- ZFS_BPOOL_CREATE_OPTIONS : boot pool options to set on creation (see defaults below) +- ZFS_RPOOL_CREATE_OPTIONS : root pool options to set on creation (see defaults below) - ZFS_POOLS_RAID_TYPE : options: blank (striping), `mirror`, `raidz`, `raidz2`, `raidz3`; if unset, it will be asked. - ZFS_NO_INFO_MESSAGES : set 1 to skip informational messages - ZFS_SWAP_SIZE : swap size (integer); set 0 for no swap @@ -200,9 +202,9 @@ When installing the O/S via $ZFS_OS_INSTALLATION_SCRIPT, the root pool is mounte 2. internet must be accessible while chrooting in `'$c_zfs_mount_dir'` (ie. `echo nameserver 8.8.8.8 >> '$c_zfs_mount_dir'/etc/resolv.conf`) 3. `'$c_zfs_mount_dir'` must be left in a dismountable state (e.g. no file locks, no swap etc.); -Boot pool default tweaks: '"${c_default_bpool_tweaks[@]/#-/$'\n' -}"' +Boot pool default create options: '"${c_default_bpool_create_options[*]/#-/$'\n' -}"' -Root pool default tweaks: '"${c_default_rpool_tweaks[*]/#-/$'\n' -}"' +Root pool default create options: '"${c_default_rpool_create_options[*]/#-/$'\n' -}"' ' echo "$help" @@ -662,26 +664,26 @@ function ask_pool_names { print_variables v_bpool_name v_rpool_name } -function ask_pool_tweaks { +function ask_pool_create_options { print_step_info_header - local bpool_tweaks_message='Insert the tweaks for the boot pool + local bpool_create_options_message='Insert the create options for the boot pool -The option `-O devices=off` is already set, and must not be specified.' +The mount-related options are automatically added, and must not be specified.' - local raw_bpool_tweaks=${ZFS_BPOOL_TWEAKS:-$(whiptail --inputbox "$bpool_tweaks_message" 30 100 -- "${c_default_bpool_tweaks[*]}" 3>&1 1>&2 2>&3)} + local raw_bpool_create_options=${ZFS_BPOOL_CREATE_OPTIONS:-$(whiptail --inputbox "$bpool_create_options_message" 30 100 -- "${c_default_bpool_create_options[*]}" 3>&1 1>&2 2>&3)} - mapfile -d' ' -t v_bpool_tweaks < <(echo -n "$raw_bpool_tweaks") + mapfile -d' ' -t v_bpool_create_options < <(echo -n "$raw_bpool_create_options") - local rpool_tweaks_message='Insert the tweaks for the root pool + local rpool_create_options_message='Insert the create options for the root pool -The option `-O devices=off` is already set, and must not be specified.' +The encryption/mount-related options are automatically added, and must not be specified.' - local raw_rpool_tweaks=${ZFS_RPOOL_TWEAKS:-$(whiptail --inputbox "$rpool_tweaks_message" 30 100 -- "${c_default_rpool_tweaks[*]}" 3>&1 1>&2 2>&3)} + local raw_rpool_create_options=${ZFS_RPOOL_CREATE_OPTIONS:-$(whiptail --inputbox "$rpool_create_options_message" 30 100 -- "${c_default_rpool_create_options[*]}" 3>&1 1>&2 2>&3)} - mapfile -d' ' -t v_rpool_tweaks < <(echo -n "$raw_rpool_tweaks") + mapfile -d' ' -t v_rpool_create_options < <(echo -n "$raw_rpool_create_options") - print_variables v_bpool_tweaks v_rpool_tweaks + print_variables v_bpool_create_options v_rpool_create_options } function install_host_packages { @@ -1027,15 +1029,15 @@ function create_pools { # shellcheck disable=SC2086 # TODO: convert v_pools_raid_type to array, and quote zpool create \ "${encryption_options[@]}" \ - "${v_rpool_tweaks[@]}" \ - -O devices=off -O mountpoint=/ -R "$c_zfs_mount_dir" -f \ + "${v_rpool_create_options[@]}" \ + -O mountpoint=/ -R "$c_zfs_mount_dir" -f \ "$v_rpool_name" $v_pools_raid_type "${rpool_disks_partitions[@]}" \ < "$c_passphrase_named_pipe" # shellcheck disable=SC2086 # TODO: See above zpool create \ - "${v_bpool_tweaks[@]}" \ - -O devices=off -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ + "${v_bpool_create_options[@]}" \ + -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ "$v_bpool_name" $v_pools_raid_type "${bpool_disks_partitions[@]}" } @@ -1460,7 +1462,7 @@ ask_boot_partition_size ask_swap_size ask_free_tail_space ask_pool_names -ask_pool_tweaks +ask_pool_create_options distro_dependent_invoke "install_host_packages" setup_partitions From 75793779dd8fc9338d91995767d9a204bdf900e3 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 22:35:08 +0200 Subject: [PATCH 22/90] Split install_and_configure_bootloader in separate functions --- install-zfs.sh | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index b2cd838..42a2f72 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1195,7 +1195,7 @@ function install_jail_zfs_packages_UbuntuServer { fi } -function install_and_configure_bootloader { +function prepare_efi_partition { print_step_info_header chroot_execute "echo PARTUUID=$(blkid -s PARTUUID -o value "${v_selected_disks[0]}-part1") /boot/efi vfat nofail,x-systemd.device-timeout=1 0 1 > /etc/fstab" @@ -1204,6 +1204,10 @@ function install_and_configure_bootloader { chroot_execute "mount /boot/efi" chroot_execute "grub-install" +} + +function configure_and_update_grub { + print_step_info_header chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX=\")/\${1}root=ZFS=$v_rpool_name /' /etc/default/grub" @@ -1230,16 +1234,9 @@ function install_and_configure_bootloader { chroot_execute "update-grub" } -function install_and_configure_bootloader_Debian { +function configure_and_update_grub_Debian { print_step_info_header - chroot_execute "echo PARTUUID=$(blkid -s PARTUUID -o value "${v_selected_disks[0]}-part1") /boot/efi vfat nofail,x-systemd.device-timeout=1 0 1 > /etc/fstab" - - chroot_execute "mkdir -p /boot/efi" - chroot_execute "mount /boot/efi" - - chroot_execute "grub-install" - chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX=\")/\${1}root=ZFS=$v_rpool_name /' /etc/default/grub" chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX_DEFAULT=.*)quiet/\$1/' /etc/default/grub" chroot_execute "perl -i -pe 's/#(GRUB_TERMINAL=console)/\$1/' /etc/default/grub" @@ -1485,7 +1482,8 @@ fi prepare_jail distro_dependent_invoke "install_jail_zfs_packages" -distro_dependent_invoke "install_and_configure_bootloader" +prepare_efi_partition +distro_dependent_invoke "configure_and_update_grub" sync_efi_partitions configure_boot_pool_import update_initramfs From 1ceaff98c3e8aebc5d8bce9cab4504f7bdafaebf Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 19:18:10 +0200 Subject: [PATCH 23/90] Update pool creation features to updated procedure --- install-zfs.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index 42a2f72..c0df098 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -49,10 +49,29 @@ c_default_boot_partition_size=2048 # megabytes c_memory_warning_limit=2880 # megabytes; not set to 3072 because on some systems, some RAM is occupied/shared c_default_bpool_create_options=( -o ashift=12 + -o autotrim=on + -d + -o feature@async_destroy=enabled + -o feature@bookmarks=enabled + -o feature@embedded_data=enabled + -o feature@empty_bpobj=enabled + -o feature@enabled_txg=enabled + -o feature@extensible_dataset=enabled + -o feature@filesystem_limits=enabled + -o feature@hole_birth=enabled + -o feature@large_blocks=enabled + -o feature@lz4_compress=enabled + -o feature@spacemap_histogram=enabled + -O acltype=posixacl + -O compression=lz4 -O devices=off + -O normalization=formD + -O relatime=on + -O xattr=sa ) c_default_rpool_create_options=( -o ashift=12 + -o autotrim=on -O acltype=posixacl -O compression=lz4 -O dnodesize=auto From 09a1cbb97c9977b70f67462a2ccb88bba9499ed2 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 22:46:05 +0200 Subject: [PATCH 24/90] Add GRUB option `init_on_alloc` to address performance regression --- install-zfs.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index c0df098..5619086 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1228,6 +1228,8 @@ function prepare_efi_partition { function configure_and_update_grub { print_step_info_header + chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=\"\K/init_on_alloc=0 /' /etc/default/grub" + chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX=\")/\${1}root=ZFS=$v_rpool_name /' /etc/default/grub" # Silence warning during the grub probe (source: https://git.io/JenXF). @@ -1256,6 +1258,8 @@ function configure_and_update_grub { function configure_and_update_grub_Debian { print_step_info_header + chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=\"\K/init_on_alloc=0 /' /etc/default/grub" + chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX=\")/\${1}root=ZFS=$v_rpool_name /' /etc/default/grub" chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX_DEFAULT=.*)quiet/\$1/' /etc/default/grub" chroot_execute "perl -i -pe 's/#(GRUB_TERMINAL=console)/\$1/' /etc/default/grub" From 4f1ba1ae2d5ff99702ffd3661c99e01f26107e41 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 22:48:41 +0200 Subject: [PATCH 25/90] Use UUID rather than PARTUUID for boot partitions in the fstab Should be essentially the same, but this is the new procedure. --- install-zfs.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 5619086..88e20bf 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1217,7 +1217,9 @@ function install_jail_zfs_packages_UbuntuServer { function prepare_efi_partition { print_step_info_header - chroot_execute "echo PARTUUID=$(blkid -s PARTUUID -o value "${v_selected_disks[0]}-part1") /boot/efi vfat nofail,x-systemd.device-timeout=1 0 1 > /etc/fstab" + # The other mounts are configured/synced in the EFI partitions sync stage. + # + chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[0]}"-part1) /boot/efi vfat defaults 0 0 > /etc/fstab" chroot_execute "mkdir -p /boot/efi" chroot_execute "mount /boot/efi" @@ -1273,7 +1275,7 @@ function sync_efi_partitions { for ((i = 1; i < ${#v_selected_disks[@]}; i++)); do local synced_efi_partition_path="/boot/efi$((i + 1))" - chroot_execute "echo PARTUUID=$(blkid -s PARTUUID -o value "${v_selected_disks[i]}-part1") $synced_efi_partition_path vfat nofail,x-systemd.device-timeout=1 0 1 >> /etc/fstab" + chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[0]}"-part1) $synced_efi_partition_path vfat defaults 0 0 >> /etc/fstab" chroot_execute "mkdir -p $synced_efi_partition_path" chroot_execute "mount $synced_efi_partition_path" From bf0e16f74b74fa4a2f97523c0d3381656e85a140 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 22:58:37 +0200 Subject: [PATCH 26/90] Disable bpool name customization This is mandated by the updated procedure. --- README.md | 1 - install-zfs.sh | 37 ++++++++++++------------------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 8fb1bed..5a3a40a 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,6 @@ The procedure can be entirely automated via environment variables: - ZFS_USE_PPAS : set to 1 to use packages from `ppa:jonathonf/zfs` (automatically set to true if the O/S version doesn't ship at least v0.8) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated - ZFS_PASSPHRASE -- ZFS_BPOOL_NAME - ZFS_RPOOL_NAME - ZFS_BPOOL_TWEAKS : boot pool options to set on creation (defaults to `-o ashift=12`) - ZFS_RPOOL_TWEAKS : root pool options to set on creation (defaults to `-o ashift=12 -O acltype=posixacl -O compression=lz4 -O dnodesize=auto -O relatime=on -O xattr=sa -O normalization=formD`) diff --git a/install-zfs.sh b/install-zfs.sh index 88e20bf..ca6004a 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -21,7 +21,6 @@ set -o nounset # Note that `ZFS_PASSPHRASE` and `ZFS_POOLS_RAID_TYPE` consider the unset state (see help). v_boot_partition_size= # Integer number with `M` or `G` suffix -v_bpool_name= v_bpool_create_options= # array; see defaults below for format v_root_password= # Debian-only v_rpool_name= @@ -43,6 +42,7 @@ v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_d # Note that Linux Mint is "Linuxmint" from v20 onwards. This actually helps, since some operations are # specific to it. +c_bpool_name=bpool c_ppa=ppa:jonathonf/zfs c_efi_system_partition_size=512 # megabytes c_default_boot_partition_size=2048 # megabytes @@ -204,7 +204,6 @@ The procedure can be entirely automated via environment variables: - ZFS_BOOT_PARTITION_SIZE : integer number with `M` or `G` suffix (defaults to `'${c_default_boot_partition_size}M'`) - ZFS_PASSPHRASE : set non-blank to encrypt the pool, and blank not to. if unset, it will be asked. - ZFS_DEBIAN_ROOT_PASSWORD -- ZFS_BPOOL_NAME - ZFS_RPOOL_NAME - ZFS_BPOOL_CREATE_OPTIONS : boot pool options to set on creation (see defaults below) - ZFS_RPOOL_CREATE_OPTIONS : root pool options to set on creation (see defaults below) @@ -653,21 +652,9 @@ For detailed informations, see the wiki page: https://github.com/saveriomiroddi/ print_variables v_free_tail_space } -function ask_pool_names { +function ask_rpool_name { print_step_info_header - if [[ ${ZFS_BPOOL_NAME:-} != "" ]]; then - v_bpool_name=$ZFS_BPOOL_NAME - else - local bpool_name_invalid_message= - - while [[ ! $v_bpool_name =~ ^[a-z][a-zA-Z_:.-]+$ ]]; do - v_bpool_name=$(whiptail --inputbox "${bpool_name_invalid_message}Insert the name for the boot pool" 30 100 bpool 3>&1 1>&2 2>&3) - - bpool_name_invalid_message="Invalid pool name! " - done - fi - if [[ ${ZFS_RPOOL_NAME:-} != "" ]]; then v_rpool_name=$ZFS_RPOOL_NAME else @@ -680,7 +667,7 @@ function ask_pool_names { done fi - print_variables v_bpool_name v_rpool_name + print_variables v_rpool_name } function ask_pool_create_options { @@ -1057,7 +1044,7 @@ function create_pools { zpool create \ "${v_bpool_create_options[@]}" \ -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ - "$v_bpool_name" $v_pools_raid_type "${bpool_disks_partitions[@]}" + "$c_bpool_name" $v_pools_raid_type "${bpool_disks_partitions[@]}" } function create_swap_volume { @@ -1125,7 +1112,7 @@ function remove_temp_partition_and_expand_rpool { # For unencrypted pools, `-l` doesn't interfere. # zpool import -l -R "$c_zfs_mount_dir" "$v_rpool_name" < "$c_passphrase_named_pipe_2" - zpool import -l -R "$c_zfs_mount_dir" "$v_bpool_name" + zpool import -l -R "$c_zfs_mount_dir" "$c_bpool_name" for selected_disk in "${v_selected_disks[@]}"; do zpool online -e "$v_rpool_name" "$selected_disk-part3" @@ -1293,7 +1280,7 @@ function sync_efi_partitions { function configure_boot_pool_import { print_step_info_header - chroot_execute "cat > /etc/systemd/system/zfs-import-$v_bpool_name.service < /etc/systemd/system/zfs-import-$c_bpool_name.service <> /etc/fstab" + chroot_execute "zfs set mountpoint=legacy $c_bpool_name" + chroot_execute "echo $c_bpool_name /boot zfs nodev,relatime,x-systemd.requires=zfs-import-$c_bpool_name.service 0 0 >> /etc/fstab" } # This step is important in cases where the keyboard layout is not the standard one. @@ -1385,7 +1372,7 @@ ConditionVirtualization=!container [Service] Type=oneshot -ExecStart=/sbin/zpool trim $v_bpool_name +ExecStart=/sbin/zpool trim $c_bpool_name ExecStart=/sbin/zpool trim $v_rpool_name UNIT" @@ -1483,7 +1470,7 @@ ask_encryption ask_boot_partition_size ask_swap_size ask_free_tail_space -ask_pool_names +ask_rpool_name ask_pool_create_options distro_dependent_invoke "install_host_packages" From 3f2855c41132df4ae81c21ed94eba3cde7089a95 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 24 Apr 2021 23:04:49 +0200 Subject: [PATCH 27/90] Modern filesystem mount ordering --- install-zfs.sh | 67 ++++++++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 41 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index ca6004a..ab9ad31 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1042,6 +1042,7 @@ function create_pools { # shellcheck disable=SC2086 # TODO: See above zpool create \ + -o cachefile=/etc/zfs/zpool.cache \ "${v_bpool_create_options[@]}" \ -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ "$c_bpool_name" $v_pools_raid_type "${bpool_disks_partitions[@]}" @@ -1058,6 +1059,11 @@ function create_swap_volume { fi } +function copy_zpool_cache { + mkdir -p "$c_zfs_mount_dir/etc/zfs" + cp /etc/zfs/zpool.cache "$c_zfs_mount_dir/etc/zfs/" +} + function sync_os_temp_installation_dir_to_rpool { print_step_info_header @@ -1219,8 +1225,6 @@ function configure_and_update_grub { chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=\"\K/init_on_alloc=0 /' /etc/default/grub" - chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX=\")/\${1}root=ZFS=$v_rpool_name /' /etc/default/grub" - # Silence warning during the grub probe (source: https://git.io/JenXF). # chroot_execute "echo 'GRUB_DISABLE_OS_PROBER=true' >> /etc/default/grub" @@ -1249,7 +1253,6 @@ function configure_and_update_grub_Debian { chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=\"\K/init_on_alloc=0 /' /etc/default/grub" - chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX=\")/\${1}root=ZFS=$v_rpool_name /' /etc/default/grub" chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX_DEFAULT=.*)quiet/\$1/' /etc/default/grub" chroot_execute "perl -i -pe 's/#(GRUB_TERMINAL=console)/\$1/' /etc/default/grub" @@ -1277,32 +1280,6 @@ function sync_efi_partitions { chroot_execute "umount /boot/efi" } -function configure_boot_pool_import { - print_step_info_header - - chroot_execute "cat > /etc/systemd/system/zfs-import-$c_bpool_name.service <> /etc/fstab" -} - # This step is important in cases where the keyboard layout is not the standard one. # See issue https://github.com/saveriomiroddi/zfs-installer/issues/110. # @@ -1312,47 +1289,54 @@ function update_initramfs { chroot_execute "update-initramfs -u" } -function update_zed_cache_Debian { +function fix_filesystem_mount_ordering { + print_step_info_header + chroot_execute "mkdir /etc/zfs/zfs-list.cache" - chroot_execute "touch /etc/zfs/zfs-list.cache/$v_rpool_name" + chroot_execute "touch /etc/zfs/zfs-list.cache/$c_bpool_name /etc/zfs/zfs-list.cache/$v_rpool_name" chroot_execute "ln -s /usr/lib/zfs-linux/zed.d/history_event-zfs-list-cacher.sh /etc/zfs/zed.d/" - # Assumed to be present by the zedlet above, but missing. + # Assumed to be present by the zedlet above on Debian, but missing. # Filed issue: https://github.com/zfsonlinux/zfs/issues/9945. # chroot_execute "mkdir /run/lock" + # It's not clear (based on the help) why it's explicitly run in foreground (`-F`), but backgrounded. + # chroot_execute "zed -F &" # We could pool the events via `zpool events -v`, but it's much simpler to just check on the file. # local success= - if [[ ! -s "$c_zfs_mount_dir/etc/zfs/zfs-list.cache/$v_rpool_name" ]]; then - # Takes around half second on a test VM. + if [[ ! -s $c_zfs_mount_dir/etc/zfs/zfs-list.cache/$c_bpool_name || ! -s $c_zfs_mount_dir/etc/zfs/zfs-list.cache/$v_rpool_name ]]; then + # For the rpool only, it takes around half second on a test VM. # - chroot_execute "zfs set canmount=noauto $v_rpool_name" + chroot_execute "zfs set canmount=on $c_bpool_name" + chroot_execute "zfs set canmount=on $v_rpool_name" SECONDS=0 while [[ $SECONDS -lt 5 ]]; do - if [[ -s "$c_zfs_mount_dir/etc/zfs/zfs-list.cache/$v_rpool_name" ]]; then + if [[ ! -s $c_zfs_mount_dir/etc/zfs/zfs-list.cache/$c_bpool_name || ! -s $c_zfs_mount_dir/etc/zfs/zfs-list.cache/$v_rpool_name ]]; then success=1 break else sleep 0.25 fi done + else + success=1 fi + chroot_execute "pkill zed" + if [[ $success -ne 1 ]]; then echo "Error: The ZFS cache hasn't been updated by ZED!" exit 1 fi - chroot_execute "pkill zed" - - chroot_execute "sed -Ei 's|$c_installed_os_data_mount_dir/?|/|' /etc/zfs/zfs-list.cache/$v_rpool_name" + chroot_execute "sed -Ei 's|$c_zfs_mount_dir/?|/|' /etc/zfs/zfs-list.cache/*" } # We don't care about synchronizing with the `fstrim` service for two reasons: @@ -1482,11 +1466,13 @@ if [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" == "" ]]; then create_pools create_swap_volume + copy_zpool_cache sync_os_temp_installation_dir_to_rpool remove_temp_partition_and_expand_rpool else create_pools create_swap_volume + copy_zpool_cache remove_temp_partition_and_expand_rpool custom_install_operating_system @@ -1497,9 +1483,8 @@ distro_dependent_invoke "install_jail_zfs_packages" prepare_efi_partition distro_dependent_invoke "configure_and_update_grub" sync_efi_partitions -configure_boot_pool_import update_initramfs -distro_dependent_invoke "update_zed_cache" --noforce +fix_filesystem_mount_ordering configure_pools_trimming configure_remaining_settings From aaf07df79c65ff03995bd1acbe86930344ad70d9 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 19:48:14 +0200 Subject: [PATCH 28/90] Cosmetic fixes --- install-zfs.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index ab9ad31..5d7bf49 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -342,7 +342,7 @@ function find_suitable_disks { candidate_disk_ids=$(find /dev/disk/by-id -regextype awk -regex '.+/(ata|nvme|scsi|mmc)-.+' -not -regex '.+-part[0-9]+$' | sort) mounted_devices="$(df | awk 'BEGIN {getline} {print $1}' | xargs -n 1 lsblk -no pkname 2> /dev/null | sort -u || true)" - while read -r disk_id || [[ -n "$disk_id" ]]; do + while read -r disk_id || [[ -n $disk_id ]]; do local device_info local block_device_basename @@ -370,7 +370,6 @@ function find_suitable_disks { $(udevadm info --query=property "$(readlink -f "$disk_id")") LOG - done < <(echo -n "$candidate_disk_ids") if [[ ${#v_suitable_disks[@]} -eq 0 ]]; then From cf57572a6bb21b3ed9edf30ba36657a7a6b2f395 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 20:21:09 +0200 Subject: [PATCH 29/90] Sort disks in selection dialog --- install-zfs.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 5d7bf49..298fcdd 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -471,11 +471,16 @@ function select_disks { local disk_selection_status=OFF fi + # St00pid simple way of sorting by block device name. Relies on the tokens not including whitespace. + for disk_id in "${v_suitable_disks[@]}"; do block_device_basename="$(basename "$(readlink -f "$disk_id")")" - menu_entries_option+=("$disk_id" "($block_device_basename)" "$disk_selection_status") + menu_entries_option+=("$disk_id ($block_device_basename) $disk_selection_status") done + # shellcheck disable=2207 # cheating here, for simplicity (alternative: add tr and mapfile). + menu_entries_option=($(printf $'%s\n' "${menu_entries_option[@]}" | sort -k 2)) + local dialog_message="Select the ZFS devices. Devices with mounted partitions, cdroms, and removable devices are not displayed! From d3a73479caf2945f448fc4c0c94a8e1c38dbbfc1 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 20:36:59 +0200 Subject: [PATCH 30/90] Update memory warning message and placement For simplicity, always display it (when the memory is not enough). Debian ended to be a real headscratcher, so it's best to be very clear. --- install-zfs.sh | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 298fcdd..9d284fb 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -46,7 +46,7 @@ c_bpool_name=bpool c_ppa=ppa:jonathonf/zfs c_efi_system_partition_size=512 # megabytes c_default_boot_partition_size=2048 # megabytes -c_memory_warning_limit=2880 # megabytes; not set to 3072 because on some systems, some RAM is occupied/shared +c_memory_warning_limit=$((3584 - 128)) # megabytes; exclude some RAM, which can be occupied/shared c_default_bpool_create_options=( -o ashift=12 -o autotrim=on @@ -322,6 +322,24 @@ In order to stop the procedure, hit Esc twice during dialogs (excluding yes/no o fi } +function check_system_memory { + local system_memory + system_memory=$(free -m | perl -lane 'print @F[1] if $. == 2') + + if [[ $system_memory -lt $c_memory_warning_limit && -z ${ZFS_NO_INFO_MESSAGES:-} ]]; then + # A workaround for these cases is to use the swap generate, but this can potentially cause troubles + # (severe compilation slowdowns) if a user tries to compensate too little memory with a large swapfile. + # + local dialog_message='WARNING! In some cases, the ZFS modules require compilation. + +On systems with relatively little RAM, the procedure may crash during the compilation, for example with 3 GB on Debian 10.9. + +In such cases, the module building may fail abruptly, either without visible errors (leaving "process killed" messages in the syslog), or with package installation errors (leaving odd errors in the module'\''s `make.log`).' + + whiptail --msgbox "$dialog_message" 30 100 + fi +} + function find_suitable_disks { print_step_info_header @@ -411,19 +429,6 @@ function set_zfs_ppa_requirement { # if [[ ${ZFS_USE_PPA:-} == "1" ]] || dpkg --compare-versions "$zfs_package_version" lt 0.8; then v_use_ppa=1 - - local system_memory - system_memory=$(free -m | perl -lane 'print @F[1] if $. == 2') - - if [[ $system_memory -lt $c_memory_warning_limit && -z ${ZFS_NO_INFO_MESSAGES:-} ]]; then - local dialog_message='WARNING! The PPA is used, which requires compiling the ZFS module. - -On systems with relatively little RAM (less than around 3 GiB), the procedure may crash during the compilation. - -In case of crash due to low memory, no error message is displayed; the only traces are `Killed process` messages in the syslog.' - - whiptail --msgbox "$dialog_message" 30 100 - fi fi } @@ -1447,6 +1452,7 @@ distro_dependent_invoke "store_os_distro_information" store_running_processes check_prerequisites display_intro_banner +check_system_memory find_suitable_disks distro_dependent_invoke "set_zfs_ppa_requirement" create_passphrase_named_pipe From 9f3f4883fe3a8d640347a88b983a0a6aa29ed37c Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 20:46:10 +0200 Subject: [PATCH 31/90] Clean pools raid type option, using a Bash array --- install-zfs.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 9d284fb..20fd914 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -25,7 +25,7 @@ v_bpool_create_options= # array; see defaults below for format v_root_password= # Debian-only v_rpool_name= v_rpool_create_options= # array; see defaults below for format -v_pools_raid_type= +v_pools_raid_type=() declare -a v_selected_disks # (/dev/by-id/disk_id, ...) v_swap_size= # integer v_free_tail_space= # integer @@ -504,8 +504,10 @@ Devices with mounted partitions, cdroms, and removable devices are not displayed function select_pools_raid_type { print_step_info_header + local raw_pools_raid_type= + if [[ -v ZFS_POOLS_RAID_TYPE ]]; then - v_pools_raid_type=$ZFS_POOLS_RAID_TYPE + raw_pools_raid_type=$ZFS_POOLS_RAID_TYPE elif [[ ${#v_selected_disks[@]} -ge 2 ]]; then # Entries preparation. @@ -536,7 +538,11 @@ function select_pools_raid_type { fi local dialog_message="Select the pools RAID type." - v_pools_raid_type=$(whiptail --radiolist "$dialog_message" 30 100 $((${#menu_entries_option[@]} / 3)) "${menu_entries_option[@]}" 3>&1 1>&2 2>&3) + raw_pools_raid_type=$(whiptail --radiolist "$dialog_message" 30 100 $((${#menu_entries_option[@]} / 3)) "${menu_entries_option[@]}" 3>&1 1>&2 2>&3) + fi + + if [[ -n $raw_pools_raid_type ]]; then + v_pools_raid_type=("$raw_pools_raid_type") fi } @@ -1041,20 +1047,18 @@ function create_pools { # # Stdin is ignored if the encryption is not set (and set via prompt). # - # shellcheck disable=SC2086 # TODO: convert v_pools_raid_type to array, and quote zpool create \ "${encryption_options[@]}" \ "${v_rpool_create_options[@]}" \ -O mountpoint=/ -R "$c_zfs_mount_dir" -f \ - "$v_rpool_name" $v_pools_raid_type "${rpool_disks_partitions[@]}" \ + "$v_rpool_name" "${v_pools_raid_type[@]}" "${rpool_disks_partitions[@]}" \ < "$c_passphrase_named_pipe" - # shellcheck disable=SC2086 # TODO: See above zpool create \ -o cachefile=/etc/zfs/zpool.cache \ "${v_bpool_create_options[@]}" \ -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ - "$c_bpool_name" $v_pools_raid_type "${bpool_disks_partitions[@]}" + "$c_bpool_name" "${v_pools_raid_type[@]}" "${bpool_disks_partitions[@]}" } function create_swap_volume { From 27eeca1fed54097cfcd4d1a1e41c7ffd54b05d3b Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 21:01:35 +0200 Subject: [PATCH 32/90] Cosmetic: Simplify GRUB editing expressions --- install-zfs.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 20fd914..f8feb17 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1248,9 +1248,9 @@ function configure_and_update_grub { # chroot_execute "perl -i -pe 's/(GRUB_TIMEOUT_STYLE=hidden)/#\$1/' /etc/default/grub" chroot_execute "perl -i -pe 's/^(GRUB_HIDDEN_.*)/#\$1/' /etc/default/grub" - chroot_execute "perl -i -pe 's/(GRUB_TIMEOUT=)0/\${1}5/' /etc/default/grub" - chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX_DEFAULT=.*)quiet/\$1/' /etc/default/grub" - chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX_DEFAULT=.*)splash/\$1/' /etc/default/grub" + chroot_execute "perl -i -pe 's/GRUB_TIMEOUT=\K0/5/' /etc/default/grub" + chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=.*\Kquiet//' /etc/default/grub" + chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=.*\Ksplash//' /etc/default/grub" chroot_execute "perl -i -pe 's/#(GRUB_TERMINAL=console)/\$1/' /etc/default/grub" chroot_execute 'echo "GRUB_RECORDFAIL_TIMEOUT=5" >> /etc/default/grub' @@ -1266,7 +1266,7 @@ function configure_and_update_grub_Debian { chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=\"\K/init_on_alloc=0 /' /etc/default/grub" - chroot_execute "perl -i -pe 's/(GRUB_CMDLINE_LINUX_DEFAULT=.*)quiet/\$1/' /etc/default/grub" + chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=.*\Kquiet//' /etc/default/grub" chroot_execute "perl -i -pe 's/#(GRUB_TERMINAL=console)/\$1/' /etc/default/grub" chroot_execute "update-grub" From 68dbdf7e99f358b182d0f3cc1a165feb64ce19e0 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 21:20:08 +0200 Subject: [PATCH 33/90] Print internal variables as ZFS_* exports This makes debugging speedier. --- install-zfs.sh | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index f8feb17..0144864 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -460,6 +460,31 @@ function create_passphrase_named_pipe { mkfifo "$c_passphrase_named_pipe" "$c_passphrase_named_pipe_2" } +function register_exit_hook { + function _exit_hook { + # Only the meaningful variable(s) are printed. + # In order to print the password, the store strategy should be changed, as the pipes may be empty. + # + echo "\ +# Currently set exports, for performing an unattended (as possible) installation with the same configuration: +# +export ZFS_USE_PPA=$v_use_ppa +export ZFS_SELECTED_DISKS=$(IFS=,; echo -n "${v_selected_disks[*]}") +export ZFS_BOOT_PARTITION_SIZE=$v_boot_partition_size +export ZFS_PASSPHRASE=_currently_not_available_ +export ZFS_DEBIAN_ROOT_PASSWORD=$(printf %q "$v_root_password") +export ZFS_RPOOL_NAME=$v_rpool_name +export ZFS_BPOOL_CREATE_OPTIONS=\"${v_bpool_create_options[*]}\" +export ZFS_RPOOL_CREATE_OPTIONS=\"${v_bpool_create_options[*]}\" +export ZFS_POOLS_RAID_TYPE=${v_pools_raid_type[*]} +export ZFS_NO_INFO_MESSAGES=${ZFS_NO_INFO_MESSAGES:-} +export ZFS_SWAP_SIZE=$v_swap_size +export ZFS_FREE_TAIL_SPACE=$v_free_tail_space +" + } + trap _exit_hook EXIT +} + function select_disks { print_step_info_header @@ -1460,6 +1485,7 @@ check_system_memory find_suitable_disks distro_dependent_invoke "set_zfs_ppa_requirement" create_passphrase_named_pipe +register_exit_hook select_disks select_pools_raid_type From 39c78504bdc04207f310dfc9e31f871e931db036 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 21:24:27 +0200 Subject: [PATCH 34/90] Remove named pipe files on exit Now that there's an exit hook, we can cleanup after ourselves :). --- install-zfs.sh | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 0144864..de3abc8 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -452,16 +452,14 @@ function set_zfs_ppa_requirement_Linuxmint { # By using a FIFO, we avoid having to hide statements like `echo $v_passphrase | zpoool create ...` # from the logs. # -# The FIFO file is left in the filesystem after the script exits. It's not worth taking care of -# removing it, since the environment is entirely ephemeral. -# function create_passphrase_named_pipe { - rm -f "$c_passphrase_named_pipe" "$c_passphrase_named_pipe_2" mkfifo "$c_passphrase_named_pipe" "$c_passphrase_named_pipe_2" } function register_exit_hook { function _exit_hook { + rm -f "$c_passphrase_named_pipe" "$c_passphrase_named_pipe_2" + # Only the meaningful variable(s) are printed. # In order to print the password, the store strategy should be changed, as the pipes may be empty. # @@ -1484,8 +1482,8 @@ display_intro_banner check_system_memory find_suitable_disks distro_dependent_invoke "set_zfs_ppa_requirement" -create_passphrase_named_pipe register_exit_hook +create_passphrase_named_pipe select_disks select_pools_raid_type From 7b17508e4697723c4f7c575643421e1acb183320 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 21:33:46 +0200 Subject: [PATCH 35/90] Simplify passphrase internal handling --- install-zfs.sh | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index de3abc8..09699b7 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -22,6 +22,7 @@ set -o nounset v_boot_partition_size= # Integer number with `M` or `G` suffix v_bpool_create_options= # array; see defaults below for format +v_passphrase= v_root_password= # Debian-only v_rpool_name= v_rpool_create_options= # array; see defaults below for format @@ -85,7 +86,6 @@ c_installed_os_data_mount_dir=/target declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) c_temporary_volume_size=12 # gigabytes; large enough - Debian, for example, takes ~8 GiB. c_passphrase_named_pipe=$(dirname "$(mktemp)")/zfs-installer.pp.fifo -c_passphrase_named_pipe_2=$(dirname "$(mktemp)")/zfs-installer.pp.2.fifo c_log_dir=$(dirname "$(mktemp)")/zfs-installer c_install_log=$c_log_dir/install.log @@ -453,12 +453,12 @@ function set_zfs_ppa_requirement_Linuxmint { # from the logs. # function create_passphrase_named_pipe { - mkfifo "$c_passphrase_named_pipe" "$c_passphrase_named_pipe_2" + mkfifo "$c_passphrase_named_pipe" } function register_exit_hook { function _exit_hook { - rm -f "$c_passphrase_named_pipe" "$c_passphrase_named_pipe_2" + rm -f "$c_passphrase_named_pipe" # Only the meaningful variable(s) are printed. # In order to print the password, the store strategy should be changed, as the pipes may be empty. @@ -592,25 +592,23 @@ function ask_root_password_Debian { function ask_encryption { print_step_info_header - local passphrase= - set +x if [[ -v ZFS_PASSPHRASE ]]; then - passphrase=$ZFS_PASSPHRASE + v_passphrase=$ZFS_PASSPHRASE else local passphrase_repeat=_ local passphrase_invalid_message= - while [[ $passphrase != "$passphrase_repeat" || ${#passphrase} -lt 8 ]]; do + while [[ $v_passphrase != "$passphrase_repeat" || ${#v_passphrase} -lt 8 ]]; do local dialog_message="${passphrase_invalid_message}Please enter the passphrase (8 chars min.): Leave blank to keep encryption disabled. " - passphrase=$(whiptail --passwordbox "$dialog_message" 30 100 3>&1 1>&2 2>&3) + v_passphrase=$(whiptail --passwordbox "$dialog_message" 30 100 3>&1 1>&2 2>&3) - if [[ -z $passphrase ]]; then + if [[ -z $v_passphrase ]]; then break fi @@ -620,9 +618,6 @@ Leave blank to keep encryption disabled. done fi - echo -n "$passphrase" > "$c_passphrase_named_pipe" & - echo -n "$passphrase" > "$c_passphrase_named_pipe_2" & - set -x } @@ -1036,23 +1031,14 @@ function custom_install_operating_system { function create_pools { # POOL OPTIONS ####################### - local passphrase local encryption_options=() local rpool_disks_partitions=() local bpool_disks_partitions=() set +x - - passphrase=$(cat "$c_passphrase_named_pipe") - - if [[ -n $passphrase ]]; then + if [[ -n $v_passphrase ]]; then encryption_options=(-O "encryption=on" -O "keylocation=prompt" -O "keyformat=passphrase") fi - - # Push back for unlogged reuse. Minor inconvenience, but worth :-) - # - echo -n "$passphrase" > "$c_passphrase_named_pipe" & - set -x for selected_disk in "${v_selected_disks[@]}"; do @@ -1064,6 +1050,10 @@ function create_pools { # The root pool must be created first, since the boot pool mountpoint is inside it. + set +x + echo -n "$v_passphrase" > "$c_passphrase_named_pipe" & + set -x + # `-R` creates an "Alternate Root Point", which is lost on unmount; it's just a convenience for a temporary mountpoint; # `-f` force overwrite partitions is existing - in some cases, even after wipefs, a filesystem is mistakenly recognized # `-O` set filesystem properties on a pool (pools and filesystems are distincted entities, however, a pool includes an FS by default). @@ -1151,9 +1141,13 @@ function remove_temp_partition_and_expand_rpool { parted -s "$selected_disk" unit s resizepart 3 -- "$resize_reference" done + set +x + echo -n "$v_passphrase" > "$c_passphrase_named_pipe" & + set -x + # For unencrypted pools, `-l` doesn't interfere. # - zpool import -l -R "$c_zfs_mount_dir" "$v_rpool_name" < "$c_passphrase_named_pipe_2" + zpool import -l -R "$c_zfs_mount_dir" "$v_rpool_name" < "$c_passphrase_named_pipe" zpool import -l -R "$c_zfs_mount_dir" "$c_bpool_name" for selected_disk in "${v_selected_disks[@]}"; do From edb709e702616778d08d6d9348da29e4c3b16a99 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 21:34:29 +0200 Subject: [PATCH 36/90] Improvements and fix to exports printing --- install-zfs.sh | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 09699b7..dbf736a 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -460,25 +460,45 @@ function register_exit_hook { function _exit_hook { rm -f "$c_passphrase_named_pipe" + set +x + # Only the meaningful variable(s) are printed. # In order to print the password, the store strategy should be changed, as the pipes may be empty. # - echo "\ -# Currently set exports, for performing an unattended (as possible) installation with the same configuration: -# + echo " +Currently set exports, for performing an unattended (as possible) installation with the same configuration: + export ZFS_USE_PPA=$v_use_ppa export ZFS_SELECTED_DISKS=$(IFS=,; echo -n "${v_selected_disks[*]}") export ZFS_BOOT_PARTITION_SIZE=$v_boot_partition_size -export ZFS_PASSPHRASE=_currently_not_available_ +export ZFS_PASSPHRASE=$(printf %q "$v_passphrase") export ZFS_DEBIAN_ROOT_PASSWORD=$(printf %q "$v_root_password") export ZFS_RPOOL_NAME=$v_rpool_name export ZFS_BPOOL_CREATE_OPTIONS=\"${v_bpool_create_options[*]}\" -export ZFS_RPOOL_CREATE_OPTIONS=\"${v_bpool_create_options[*]}\" +export ZFS_RPOOL_CREATE_OPTIONS=\"${v_rpool_create_options[*]}\" export ZFS_POOLS_RAID_TYPE=${v_pools_raid_type[*]} -export ZFS_NO_INFO_MESSAGES=${ZFS_NO_INFO_MESSAGES:-} +export ZFS_NO_INFO_MESSAGES=1 export ZFS_SWAP_SIZE=$v_swap_size -export ZFS_FREE_TAIL_SPACE=$v_free_tail_space -" +export ZFS_FREE_TAIL_SPACE=$v_free_tail_space" + + # Convenient ready exports (selecting the first two disks): + # + local ready=" +export ZFS_USE_PPA= +export ZFS_SELECTED_DISKS=$(ls -l /dev/disk/by-id/ | perl -ane 'print "/dev/disk/by-id/@F[8]," if ! /\d$/ && ($c += 1) <= 2' | head -c -1) +export ZFS_BOOT_PARTITION_SIZE=2048M +export ZFS_PASSPHRASE=aaaaaaaa +export ZFS_DEBIAN_ROOT_PASSWORD=a +export ZFS_RPOOL_NAME=rpool +export ZFS_BPOOL_CREATE_OPTIONS='-o ashift=12 -o autotrim=on -d -o feature@async_destroy=enabled -o feature@bookmarks=enabled -o feature@embedded_data=enabled -o feature@empty_bpobj=enabled -o feature@enabled_txg=enabled -o feature@extensible_dataset=enabled -o feature@filesystem_limits=enabled -o feature@hole_birth=enabled -o feature@large_blocks=enabled -o feature@lz4_compress=enabled -o feature@spacemap_histogram=enabled -O acltype=posixacl -O compression=lz4 -O devices=off -O normalization=formD -O relatime=on -O xattr=sa' +export ZFS_RPOOL_CREATE_OPTIONS='-o ashift=12 -o autotrim=on -O acltype=posixacl -O compression=lz4 -O dnodesize=auto -O normalization=formD -O relatime=on -O xattr=sa -O devices=off' +export ZFS_POOLS_RAID_TYPE= +export ZFS_NO_INFO_MESSAGES=1 +export ZFS_SWAP_SIZE=2 +export ZFS_FREE_TAIL_SPACE=12 + " + + set -x } trap _exit_hook EXIT } From 69b50e739ba8efa8cd72a9ca93902cdd596e9c79 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 21:36:46 +0200 Subject: [PATCH 37/90] Add a few missing print_step_info_header() invocations --- install-zfs.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index dbf736a..e4b8c72 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -453,10 +453,14 @@ function set_zfs_ppa_requirement_Linuxmint { # from the logs. # function create_passphrase_named_pipe { + print_step_info_header + mkfifo "$c_passphrase_named_pipe" } function register_exit_hook { + print_step_info_header + function _exit_hook { rm -f "$c_passphrase_named_pipe" @@ -1095,6 +1099,8 @@ function create_pools { } function create_swap_volume { + print_step_info_header + if [[ $v_swap_size -gt 0 ]]; then zfs create \ -V "${v_swap_size}G" -b "$(getconf PAGESIZE)" \ @@ -1106,6 +1112,8 @@ function create_swap_volume { } function copy_zpool_cache { + print_step_info_header + mkdir -p "$c_zfs_mount_dir/etc/zfs" cp /etc/zfs/zpool.cache "$c_zfs_mount_dir/etc/zfs/" } From 76eff7e703a5449fa6bd9cda7fb1bf13b18513b8 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 22:21:09 +0200 Subject: [PATCH 38/90] Debian: Handle preexisting zed symlink In the latest test (on 10.9 w/ ZFS 2.0.3), the file already existed. --- install-zfs.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index e4b8c72..4068124 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1352,7 +1352,10 @@ function fix_filesystem_mount_ordering { chroot_execute "mkdir /etc/zfs/zfs-list.cache" chroot_execute "touch /etc/zfs/zfs-list.cache/$c_bpool_name /etc/zfs/zfs-list.cache/$v_rpool_name" - chroot_execute "ln -s /usr/lib/zfs-linux/zed.d/history_event-zfs-list-cacher.sh /etc/zfs/zed.d/" + + # On Debian, this file may exist already. + # + chroot_execute "[[ ! -f /etc/zfs/zed.d/history_event-zfs-list-cacher.sh ]] && ln -s /usr/lib/zfs-linux/zed.d/history_event-zfs-list-cacher.sh /etc/zfs/zed.d/" # Assumed to be present by the zedlet above on Debian, but missing. # Filed issue: https://github.com/zfsonlinux/zfs/issues/9945. From f2b5d4eadf3c59dea34811f523232ed9c6858e82 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 25 Apr 2021 22:31:25 +0200 Subject: [PATCH 39/90] Shellcheck cosmetix fixes --- install-zfs.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 4068124..dc67dec 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -487,7 +487,8 @@ export ZFS_FREE_TAIL_SPACE=$v_free_tail_space" # Convenient ready exports (selecting the first two disks): # - local ready=" + # shellcheck disable=SC2155,SC2012 + local _=" export ZFS_USE_PPA= export ZFS_SELECTED_DISKS=$(ls -l /dev/disk/by-id/ | perl -ane 'print "/dev/disk/by-id/@F[8]," if ! /\d$/ && ($c += 1) <= 2' | head -c -1) export ZFS_BOOT_PARTITION_SIZE=2048M From aba16fff81c03207dcd44ae0223d3f0f15a03799 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 26 Apr 2021 22:20:55 +0200 Subject: [PATCH 40/90] Merge Debian-specific GRUB configuration with regular one There isn't any significant difference. NOTE: The Debian installation may be currently broken. --- install-zfs.sh | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index dc67dec..84c09e8 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1307,17 +1307,6 @@ function configure_and_update_grub { chroot_execute "update-grub" } -function configure_and_update_grub_Debian { - print_step_info_header - - chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=\"\K/init_on_alloc=0 /' /etc/default/grub" - - chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=.*\Kquiet//' /etc/default/grub" - chroot_execute "perl -i -pe 's/#(GRUB_TERMINAL=console)/\$1/' /etc/default/grub" - - chroot_execute "update-grub" -} - function sync_efi_partitions { print_step_info_header @@ -1545,7 +1534,7 @@ fi prepare_jail distro_dependent_invoke "install_jail_zfs_packages" prepare_efi_partition -distro_dependent_invoke "configure_and_update_grub" +configure_and_update_grub sync_efi_partitions update_initramfs fix_filesystem_mount_ordering From a3e90bcc03b731e2394fc32f65a0701217f4a784 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 26 Apr 2021 22:48:35 +0200 Subject: [PATCH 41/90] Update encryption to GCM (as per updated guide) --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 84c09e8..7a19c6b 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1062,7 +1062,7 @@ function create_pools { set +x if [[ -n $v_passphrase ]]; then - encryption_options=(-O "encryption=on" -O "keylocation=prompt" -O "keyformat=passphrase") + encryption_options=(-O "encryption=aes-256-gcm" -O "keylocation=prompt" -O "keyformat=passphrase") fi set -x From 61765360ae8037ea484afcba49c2d6f15da9c0cb Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 26 Apr 2021 22:57:43 +0200 Subject: [PATCH 42/90] Remove official support for Debian --- README.md | 5 ++--- install-zfs.sh | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 5a3a40a..8f2a3c4 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ # zfs-installer -ZFS installer is a shell script program that fully prepares ZFS on a system, and allows an effortless installation of several Debian-based operating systems using their standard installer (or debootstrap, or any custom script). +ZFS installer is a shell script program that fully prepares ZFS on a system, and allows an effortless installation of several Ubuntu-based operating systems using their standard installer (or debootstrap, or any custom script). - [Requirements and functionality](#requirements-and-functionality) - [Comparison with Ubuntu built-in installer](#comparison-with-ubuntu-built-in-installer) @@ -22,7 +22,6 @@ The program currently supports: - Ubuntu Desktop 18.04.x/20.04 Live - Ubuntu Server 18.04.x/20.04 Live - Linux Mint 19.x, 20 -- Debian 10.x Live (desktop environment required) - ElementaryOS 5.1 The ZFS version installed is 0.8, which supports native encryption and trimming (among the other improvements over 0.7). The required repositories are automatically added to the destination system. @@ -31,7 +30,7 @@ EFI boot is required (any modern (2011+) system will do); legacy boot is current All the ZFS RAID types are supported, with any arbitrary number of disks. An EFI partition is created on each disk, for redundancy purposes. -It's fairly easy to extend the program to support other Debian-based operating systems (e.g. older/newer Ubuntu's, etc.) - the project is (very) open to feature requests. +It's fairly easy to extend the program to support other Ubuntu-based operating systems - the project is open to feature requests. ## Comparison with Ubuntu built-in installer diff --git a/install-zfs.sh b/install-zfs.sh index 7a19c6b..92e88b9 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -33,7 +33,7 @@ v_free_tail_space= # integer # Variables set during execution -v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) +v_linux_distribution= # Ubuntu, LinuxMint, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) v_use_ppa= # 1=true, false otherwise (applies only to Ubuntu-based). v_temp_volume_device= # /dev/zdN; scope: setup_partitions -> sync_os_temp_installation_dir_to_rpool v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_disks -> select_disk @@ -83,7 +83,7 @@ c_default_rpool_create_options=( ) c_zfs_mount_dir=/mnt c_installed_os_data_mount_dir=/target -declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) +declare -A c_supported_linux_distributions=([Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) c_temporary_volume_size=12 # gigabytes; large enough - Debian, for example, takes ~8 GiB. c_passphrase_named_pipe=$(dirname "$(mktemp)")/zfs-installer.pp.fifo From 26f0057c4966880abac872d280943d03961bb0b8 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 26 Apr 2021 23:23:46 +0200 Subject: [PATCH 43/90] Include all disks in the disk log --- install-zfs.sh | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 92e88b9..b14248f 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -340,6 +340,25 @@ In such cases, the module building may fail abruptly, either without visible err fi } +function save_disks_log { + print_step_info_header + + # shellcheck disable=SC2012 # `ls` may clean the output, but in this case, it doesn't matter + ls -l /dev/disk/by-id | tail -n +2 | perl -lane 'print "@F[8..10]"' > "$c_disks_log" + + all_disk_ids=$(find /dev/disk/by-id -mindepth 1 -regextype awk -not -regex '.+-part[0-9]+$' | sort) + + while read -r disk_id || [[ -n $disk_id ]]; do + cat >> "$c_disks_log" << LOG + +## DEVICE: $disk_id ################################ + +$(udevadm info --query=property "$(readlink -f "$disk_id")") + +LOG + done < <(echo -n "$all_disk_ids") +} + function find_suitable_disks { print_step_info_header @@ -348,9 +367,6 @@ function find_suitable_disks { # udevadm trigger - # shellcheck disable=SC2012 # `ls` may clean the output, but in this case, it doesn't matter - ls -l /dev/disk/by-id | tail -n +2 | perl -lane 'print "@F[8..10]"' > "$c_disks_log" - local candidate_disk_ids local mounted_devices @@ -380,14 +396,6 @@ function find_suitable_disks { v_suitable_disks+=("$disk_id") fi fi - - cat >> "$c_disks_log" << LOG - -## DEVICE: $disk_id ################################ - -$(udevadm info --query=property "$(readlink -f "$disk_id")") - -LOG done < <(echo -n "$candidate_disk_ids") if [[ ${#v_suitable_disks[@]} -eq 0 ]]; then @@ -1495,6 +1503,7 @@ store_running_processes check_prerequisites display_intro_banner check_system_memory +save_disks_log find_suitable_disks distro_dependent_invoke "set_zfs_ppa_requirement" register_exit_hook From 22cc994d11247b04c74afc97bc37b09db5256621 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 12:40:57 +0200 Subject: [PATCH 44/90] Cosmetic improvement to distro_dependent_invoke() --- install-zfs.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index b14248f..c081407 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -129,11 +129,12 @@ c_udevadm_settle_timeout=10 # seconds # function distro_dependent_invoke { local distro_specific_fx_name="$1_$v_linux_distribution" + local invoke_option=${2:-} if declare -f "$distro_specific_fx_name" > /dev/null; then "$distro_specific_fx_name" else - if ! declare -f "$1" > /dev/null && [[ "${2:-}" == "--noforce" ]]; then + if ! declare -f "$1" > /dev/null && [[ $invoke_option == "--noforce" ]]; then : # do nothing else "$1" From c7c0f530f8f137797602578f7e14ddd991ac12b5 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 12:48:05 +0200 Subject: [PATCH 45/90] distro_dependent_invoke(): Add option checking --- install-zfs.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index c081407..a710bf5 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -131,6 +131,11 @@ function distro_dependent_invoke { local distro_specific_fx_name="$1_$v_linux_distribution" local invoke_option=${2:-} + if [[ ! $invoke_option =~ ^(|--noforce)$ ]]; then + >&2 echo "Invalid distro_dependent_invoke() option: $invoke_option" + exit 1 + fi + if declare -f "$distro_specific_fx_name" > /dev/null; then "$distro_specific_fx_name" else From ad7f09fbb4d0c6f648323eb69a1a937b9be3cc01 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 13:10:03 +0200 Subject: [PATCH 46/90] New function invocation mechanism (README!) The new mechanism is clearly explained in the comment. The rationale is the next implementation of hot swapping. WATCH OUT! The semantics have changed. Previously, it was evident by looking at the invocation when a function had distro-based overrides; now, that needs to be looked into. Since this behavior is not very impactful, it's not been ported, for simplicity purposes. --- install-zfs.sh | 134 +++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 65 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index a710bf5..f63f11c 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -107,43 +107,47 @@ c_udevadm_settle_timeout=10 # seconds # HELPER FUNCTIONS ############################################################# -# Chooses a function and invokes it depending on the O/S distribution. +# Invoke a function, with a primitive dynamic dispatch based on the distribution. # -# Example: +# Format: `invoke "function" [--optional]`. +# +# A target function must exist, otherwise a error is raised, unless `--optional` is specified. +# `--optional` is useful when a step is specific to a single distribution, e.g. Debian's root password. +# +# Examples: # # $ function install_jail_zfs_packages { :; } # $ function install_jail_zfs_packages_Debian { :; } # $ distro_dependent_invoke "install_jail_zfs_packages" # -# If the distribution is `Debian`, the second will be invoked, otherwise, the -# first. -# -# If the function is invoked with `--noforce` as second parameter, and there is -# no matching function: +# If the distribution is `Debian`, the second will be invoked, otherwise, the first. # # $ function update_zed_cache_Ubuntu { :; } -# $ distro_dependent_invoke "install_jail_zfs_packages" --noforce +# $ distro_dependent_invoke "update_zed_cache" --optional # -# then nothing happens. Without `--noforce`, this invocation will cause an -# error. +# If the distribution is `Debian`, nothing will happen. # -function distro_dependent_invoke { - local distro_specific_fx_name="$1_$v_linux_distribution" +# $ function update_zed_cache_Ubuntu { :; } +# $ distro_dependent_invoke "update_zed_cache" +# +# If the distribution is `Debian`, an error will be raised. +# +function invoke { + local base_fx_name=$1 + local distro_specific_fx_name=$1_$v_linux_distribution local invoke_option=${2:-} - if [[ ! $invoke_option =~ ^(|--noforce)$ ]]; then - >&2 echo "Invalid distro_dependent_invoke() option: $invoke_option" + if [[ ! $invoke_option =~ ^(|--optional)$ ]]; then + >&2 echo "Invalid invoke() option: $invoke_option" exit 1 fi + # Invoke it regardless when it's not optional. + if declare -f "$distro_specific_fx_name" > /dev/null; then "$distro_specific_fx_name" - else - if ! declare -f "$1" > /dev/null && [[ $invoke_option == "--noforce" ]]; then - : # do nothing - else - "$1" - fi + elif declare -f "$base_fx_name" > /dev/null || [[ ! $invoke_option == "--optional" ]]; then + "$base_fx_name" fi } @@ -1502,59 +1506,59 @@ if [[ $# -ne 0 ]]; then display_help_and_exit fi -activate_debug -set_distribution_data -distro_dependent_invoke "store_os_distro_information" -store_running_processes -check_prerequisites -display_intro_banner -check_system_memory -save_disks_log -find_suitable_disks -distro_dependent_invoke "set_zfs_ppa_requirement" -register_exit_hook -create_passphrase_named_pipe +invoke "activate_debug" +invoke "set_distribution_data" +invoke "store_os_distro_information" +invoke "store_running_processes" +invoke "check_prerequisites" +invoke "display_intro_banner" +invoke "check_system_memory" +invoke "save_disks_log" +invoke "find_suitable_disks" +invoke "set_zfs_ppa_requirement" +invoke "register_exit_hook" +invoke "create_passphrase_named_pipe" -select_disks -select_pools_raid_type -distro_dependent_invoke "ask_root_password" --noforce -ask_encryption -ask_boot_partition_size -ask_swap_size -ask_free_tail_space -ask_rpool_name -ask_pool_create_options +invoke "select_disks" +invoke "select_pools_raid_type" +invoke "ask_root_password" --optional +invoke "ask_encryption" +invoke "ask_boot_partition_size" +invoke "ask_swap_size" +invoke "ask_free_tail_space" +invoke "ask_rpool_name" +invoke "ask_pool_create_options" -distro_dependent_invoke "install_host_packages" -setup_partitions +invoke "install_host_packages" +invoke "setup_partitions" if [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" == "" ]]; then # Includes the O/S extra configuration, if necessary (network, root pwd, etc.) - distro_dependent_invoke "install_operating_system" + invoke "install_operating_system" - create_pools - create_swap_volume - copy_zpool_cache - sync_os_temp_installation_dir_to_rpool - remove_temp_partition_and_expand_rpool + invoke "create_pools" + invoke "create_swap_volume" + invoke "copy_zpool_cache" + invoke "sync_os_temp_installation_dir_to_rpool" + invoke "remove_temp_partition_and_expand_rpool" else - create_pools - create_swap_volume - copy_zpool_cache - remove_temp_partition_and_expand_rpool + invoke "create_pools" + invoke "create_swap_volume" + invoke "copy_zpool_cache" + invoke "remove_temp_partition_and_expand_rpool" - custom_install_operating_system + invoke "custom_install_operating_system" fi -prepare_jail -distro_dependent_invoke "install_jail_zfs_packages" -prepare_efi_partition -configure_and_update_grub -sync_efi_partitions -update_initramfs -fix_filesystem_mount_ordering -configure_pools_trimming -configure_remaining_settings +invoke "prepare_jail" +invoke "install_jail_zfs_packages" +invoke "prepare_efi_partition" +invoke "configure_and_update_grub" +invoke "sync_efi_partitions" +invoke "update_initramfs" +invoke "fix_filesystem_mount_ordering" +invoke "configure_pools_trimming" +invoke "configure_remaining_settings" -prepare_for_system_exit -display_exit_banner +invoke "prepare_for_system_exit" +invoke "display_exit_banner" From 3cbc9ff207d40b0d65dcf2a200b1ceb9e0f4255a Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 13:17:18 +0200 Subject: [PATCH 47/90] Refactoring: The info header function is now invoked by invoke() --- install-zfs.sh | 107 ++++--------------------------------------------- 1 file changed, 7 insertions(+), 100 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index f63f11c..ace38fd 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -145,21 +145,22 @@ function invoke { # Invoke it regardless when it's not optional. if declare -f "$distro_specific_fx_name" > /dev/null; then + print_step_info_header "$distro_specific_fx_name" + "$distro_specific_fx_name" elif declare -f "$base_fx_name" > /dev/null || [[ ! $invoke_option == "--optional" ]]; then + print_step_info_header "$base_fx_name" + "$base_fx_name" fi } -# shellcheck disable=SC2120 # allow parameters passing even if no calls pass any function print_step_info_header { + local function_name=$1 + echo -n " ############################################################################### -# ${FUNCNAME[1]}" - - [[ "${1:-}" != "" ]] && echo -n " $1" || true - - echo " +# $function_name ############################################################################### " } @@ -241,8 +242,6 @@ Root pool default create options: '"${c_default_rpool_create_options[*]/#-/$'\n' } function activate_debug { - print_step_info_header - mkdir -p "$c_log_dir" exec 5> "$c_install_log" @@ -261,8 +260,6 @@ function set_distribution_data { } function store_os_distro_information { - print_step_info_header - lsb_release --all > "$c_os_information_log" # Madness, in order not to force the user to invoke "sudo -E". @@ -286,8 +283,6 @@ function store_running_processes { } function check_prerequisites { - print_step_info_header - local distro_version_regex=\\b${v_linux_version//./\\.}\\b if [[ ! -d /sys/firmware/efi ]]; then @@ -318,8 +313,6 @@ function check_prerequisites { } function display_intro_banner { - print_step_info_header - local dialog_message='Hello! This script will prepare the ZFS pools on the system, install Ubuntu, and configure the boot. @@ -351,8 +344,6 @@ In such cases, the module building may fail abruptly, either without visible err } function save_disks_log { - print_step_info_header - # shellcheck disable=SC2012 # `ls` may clean the output, but in this case, it doesn't matter ls -l /dev/disk/by-id | tail -n +2 | perl -lane 'print "@F[8..10]"' > "$c_disks_log" @@ -370,8 +361,6 @@ LOG } function find_suitable_disks { - print_step_info_header - # In some freaky cases, `/dev/disk/by-id` is not up to date, so we refresh. One case is after # starting a VirtualBox VM that is a full clone of a suspended VM with snapshots. # @@ -436,8 +425,6 @@ If you think this is a bug, please open an issue on https://github.com/saveriomi # install_host_packages() and install_host_packages_UbuntuServer(). # function set_zfs_ppa_requirement { - print_step_info_header - apt update local zfs_package_version @@ -460,8 +447,6 @@ function set_zfs_ppa_requirement_Debian { # to it being incorrectly setup). # function set_zfs_ppa_requirement_Linuxmint { - print_step_info_header - perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list set_zfs_ppa_requirement @@ -471,14 +456,10 @@ function set_zfs_ppa_requirement_Linuxmint { # from the logs. # function create_passphrase_named_pipe { - print_step_info_header - mkfifo "$c_passphrase_named_pipe" } function register_exit_hook { - print_step_info_header - function _exit_hook { rm -f "$c_passphrase_named_pipe" @@ -527,8 +508,6 @@ export ZFS_FREE_TAIL_SPACE=12 } function select_disks { - print_step_info_header - if [[ "${ZFS_SELECTED_DISKS:-}" != "" ]]; then mapfile -d, -t v_selected_disks < <(echo -n "$ZFS_SELECTED_DISKS") else @@ -568,8 +547,6 @@ Devices with mounted partitions, cdroms, and removable devices are not displayed } function select_pools_raid_type { - print_step_info_header - local raw_pools_raid_type= if [[ -v ZFS_POOLS_RAID_TYPE ]]; then @@ -613,8 +590,6 @@ function select_pools_raid_type { } function ask_root_password_Debian { - print_step_info_header - set +x if [[ ${ZFS_DEBIAN_ROOT_PASSWORD:-} != "" ]]; then v_root_password="$ZFS_DEBIAN_ROOT_PASSWORD" @@ -633,8 +608,6 @@ function ask_root_password_Debian { } function ask_encryption { - print_step_info_header - set +x if [[ -v ZFS_PASSPHRASE ]]; then @@ -665,8 +638,6 @@ Leave blank to keep encryption disabled. } function ask_boot_partition_size { - print_step_info_header - if [[ ${ZFS_BOOT_PARTITION_SIZE:-} != "" ]]; then v_boot_partition_size=$ZFS_BOOT_PARTITION_SIZE else @@ -685,8 +656,6 @@ Supported formats: '512M', '3G'" 30 100 ${c_default_boot_partition_size}M 3>&1 1 } function ask_swap_size { - print_step_info_header - if [[ ${ZFS_SWAP_SIZE:-} != "" ]]; then v_swap_size=$ZFS_SWAP_SIZE else @@ -703,8 +672,6 @@ function ask_swap_size { } function ask_free_tail_space { - print_step_info_header - if [[ ${ZFS_FREE_TAIL_SPACE:-} != "" ]]; then v_free_tail_space=$ZFS_FREE_TAIL_SPACE else @@ -729,8 +696,6 @@ For detailed informations, see the wiki page: https://github.com/saveriomiroddi/ } function ask_rpool_name { - print_step_info_header - if [[ ${ZFS_RPOOL_NAME:-} != "" ]]; then v_rpool_name=$ZFS_RPOOL_NAME else @@ -747,8 +712,6 @@ function ask_rpool_name { } function ask_pool_create_options { - print_step_info_header - local bpool_create_options_message='Insert the create options for the boot pool The mount-related options are automatically added, and must not be specified.' @@ -769,8 +732,6 @@ The encryption/mount-related options are automatically added, and must not be sp } function install_host_packages { - print_step_info_header - if [[ $v_use_ppa == "1" ]]; then if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then add-apt-repository --yes "$c_ppa" @@ -795,8 +756,6 @@ function install_host_packages { } function install_host_packages_Debian { - print_step_info_header - if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections @@ -817,16 +776,12 @@ function install_host_packages_Debian { # Differently from Ubuntu, Mint doesn't have the package installed in the live version. # function install_host_packages_Linuxmint { - print_step_info_header - apt install --yes zfsutils-linux install_host_packages } function install_host_packages_elementary { - print_step_info_header - if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then apt update apt install --yes software-properties-common @@ -836,8 +791,6 @@ function install_host_packages_elementary { } function install_host_packages_UbuntuServer { - print_step_info_header - if [[ $v_use_ppa != "1" ]]; then apt install --yes zfsutils-linux efibootmgr @@ -869,8 +822,6 @@ function install_host_packages_UbuntuServer { } function setup_partitions { - print_step_info_header - local required_tail_space=$((v_free_tail_space > c_temporary_volume_size ? v_free_tail_space : c_temporary_volume_size)) for selected_disk in "${v_selected_disks[@]}"; do @@ -926,8 +877,6 @@ function setup_partitions { } function install_operating_system { - print_step_info_header - local dialog_message='The Ubuntu GUI installer will now be launched. Proceed with the configuration as usual, then, at the partitioning stage: @@ -969,8 +918,6 @@ Proceed with the configuration as usual, then, at the partitioning stage: } function install_operating_system_Debian { - print_step_info_header - # The temporary volume size displayed is an approximation of the format used by the installer, # but it's acceptable - the complexity required is not worth (eg. converting hypothetical units, # etc.). @@ -1023,8 +970,6 @@ CONF } function install_operating_system_UbuntuServer { - print_step_info_header - # O/S Installation # # Subiquity is designed to prevent the user from opening a terminal, which is (to say the least) @@ -1066,8 +1011,6 @@ You can switch anytime to this terminal, and back, in order to read the instruct } function custom_install_operating_system { - print_step_info_header - sudo "$ZFS_OS_INSTALLATION_SCRIPT" } @@ -1118,8 +1061,6 @@ function create_pools { } function create_swap_volume { - print_step_info_header - if [[ $v_swap_size -gt 0 ]]; then zfs create \ -V "${v_swap_size}G" -b "$(getconf PAGESIZE)" \ @@ -1131,15 +1072,11 @@ function create_swap_volume { } function copy_zpool_cache { - print_step_info_header - mkdir -p "$c_zfs_mount_dir/etc/zfs" cp /etc/zfs/zpool.cache "$c_zfs_mount_dir/etc/zfs/" } function sync_os_temp_installation_dir_to_rpool { - print_step_info_header - # On Ubuntu Server, `/boot/efi` and `/cdrom` (!!!) are mounted, but they're not needed. # local mount_dir_submounts @@ -1172,8 +1109,6 @@ function sync_os_temp_installation_dir_to_rpool { } function remove_temp_partition_and_expand_rpool { - print_step_info_header - if (( v_free_tail_space < c_temporary_volume_size )); then if [[ $v_free_tail_space -eq 0 ]]; then local resize_reference=100% @@ -1208,8 +1143,6 @@ function remove_temp_partition_and_expand_rpool { } function prepare_jail { - print_step_info_header - for virtual_fs_dir in proc sys dev; do mount --rbind "/$virtual_fs_dir" "$c_zfs_mount_dir/$virtual_fs_dir" done @@ -1220,8 +1153,6 @@ function prepare_jail { # See install_host_packages() for some comments. # function install_jail_zfs_packages { - print_step_info_header - if [[ $v_use_ppa == "1" ]]; then chroot_execute "add-apt-repository --yes $c_ppa" @@ -1246,8 +1177,6 @@ function install_jail_zfs_packages { } function install_jail_zfs_packages_Debian { - print_step_info_header - chroot_execute 'echo "deb http://deb.debian.org/debian buster main contrib" >> /etc/apt/sources.list' chroot_execute 'echo "deb-src http://deb.debian.org/debian buster main contrib" >> /etc/apt/sources.list' @@ -1267,16 +1196,12 @@ APT' } function install_jail_zfs_packages_elementary { - print_step_info_header - chroot_execute "apt install --yes software-properties-common" install_jail_zfs_packages } function install_jail_zfs_packages_UbuntuServer { - print_step_info_header - if [[ $v_use_ppa != "1" ]]; then chroot_execute "apt install --yes zfsutils-linux zfs-initramfs grub-efi-amd64-signed shim-signed" else @@ -1285,8 +1210,6 @@ function install_jail_zfs_packages_UbuntuServer { } function prepare_efi_partition { - print_step_info_header - # The other mounts are configured/synced in the EFI partitions sync stage. # chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[0]}"-part1) /boot/efi vfat defaults 0 0 > /etc/fstab" @@ -1298,8 +1221,6 @@ function prepare_efi_partition { } function configure_and_update_grub { - print_step_info_header - chroot_execute "perl -i -pe 's/GRUB_CMDLINE_LINUX_DEFAULT=\"\K/init_on_alloc=0 /' /etc/default/grub" # Silence warning during the grub probe (source: https://git.io/JenXF). @@ -1326,8 +1247,6 @@ function configure_and_update_grub { } function sync_efi_partitions { - print_step_info_header - for ((i = 1; i < ${#v_selected_disks[@]}; i++)); do local synced_efi_partition_path="/boot/efi$((i + 1))" @@ -1350,14 +1269,10 @@ function sync_efi_partitions { # See issue https://github.com/saveriomiroddi/zfs-installer/issues/110. # function update_initramfs { - print_step_info_header - chroot_execute "update-initramfs -u" } function fix_filesystem_mount_ordering { - print_step_info_header - chroot_execute "mkdir /etc/zfs/zfs-list.cache" chroot_execute "touch /etc/zfs/zfs-list.cache/$c_bpool_name /etc/zfs/zfs-list.cache/$v_rpool_name" @@ -1416,8 +1331,6 @@ function fix_filesystem_mount_ordering { # The code is a straight copy of the `fstrim` service. # function configure_pools_trimming { - print_step_info_header - chroot_execute "cat > /lib/systemd/system/zfs-trim.service << UNIT [Unit] Description=Discard unused ZFS blocks @@ -1448,15 +1361,11 @@ TIMER" } function configure_remaining_settings { - print_step_info_header - [[ $v_swap_size -gt 0 ]] && chroot_execute "echo /dev/zvol/$v_rpool_name/swap none swap discard 0 0 >> /etc/fstab" || true chroot_execute "echo RESUME=none > /etc/initramfs-tools/conf.d/resume" } function prepare_for_system_exit { - print_step_info_header - for virtual_fs_dir in dev sys proc; do umount --recursive --force --lazy "$c_zfs_mount_dir/$virtual_fs_dir" done @@ -1489,8 +1398,6 @@ function prepare_for_system_exit { } function display_exit_banner { - print_step_info_header - local dialog_message="The system has been successfully prepared and installed. You now need to perform a hard reset, then enjoy your ZFS system :-)" From e176be6521eac9d799e597b80a363ac80bfeecce Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 13:22:54 +0200 Subject: [PATCH 48/90] Add script hot swapping functionality --- install-zfs.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index ace38fd..3877090 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -43,6 +43,8 @@ v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_d # Note that Linux Mint is "Linuxmint" from v20 onwards. This actually helps, since some operations are # specific to it. +c_hotswap_file=$PWD/install-zfs.hotswap.sh # see hotswap() for an explanation. + c_bpool_name=bpool c_ppa=ppa:jonathonf/zfs c_efi_system_partition_size=512 # megabytes @@ -142,6 +144,8 @@ function invoke { exit 1 fi + hot_swap_script + # Invoke it regardless when it's not optional. if declare -f "$distro_specific_fx_name" > /dev/null; then @@ -155,6 +159,18 @@ function invoke { fi } +# Tee-hee-hee!! +# +# This is extremely useful for debugging long procedures. Since bash scripts can't be modified while +# running, this allows the dev to create a snapshot, and if the script fails after that, resume and +# add the hotswap script, so that the new code will be loaded automatically. +# +function hot_swap_script { + if [[ -f $c_hotswap_file ]]; then + source "$c_hotswap_file" + fi +} + function print_step_info_header { local function_name=$1 From 8363b37cccda29b99c111da402a31b83e2035d3d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 13:43:16 +0200 Subject: [PATCH 49/90] Make the distro-specific functions use invoke(), when calling the parent --- install-zfs.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 3877090..29bd464 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -465,7 +465,7 @@ function set_zfs_ppa_requirement_Debian { function set_zfs_ppa_requirement_Linuxmint { perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list - set_zfs_ppa_requirement + invoke "set_zfs_ppa_requirement" } # By using a FIFO, we avoid having to hide statements like `echo $v_passphrase | zpoool create ...` @@ -794,7 +794,7 @@ function install_host_packages_Debian { function install_host_packages_Linuxmint { apt install --yes zfsutils-linux - install_host_packages + invoke "install_host_packages" } function install_host_packages_elementary { @@ -803,7 +803,7 @@ function install_host_packages_elementary { apt install --yes software-properties-common fi - install_host_packages + invoke "install_host_packages" } function install_host_packages_UbuntuServer { @@ -1214,14 +1214,14 @@ APT' function install_jail_zfs_packages_elementary { chroot_execute "apt install --yes software-properties-common" - install_jail_zfs_packages + invoke "install_jail_zfs_packages" } function install_jail_zfs_packages_UbuntuServer { if [[ $v_use_ppa != "1" ]]; then chroot_execute "apt install --yes zfsutils-linux zfs-initramfs grub-efi-amd64-signed shim-signed" else - install_jail_zfs_packages + invoke "install_jail_zfs_packages" fi } From 0134cfb37c5754845e52e58785021e0e038f50a5 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 19:55:16 +0200 Subject: [PATCH 50/90] Remove trimming service (autotrim is now enabled) --- README.md | 3 +-- install-zfs.sh | 38 -------------------------------------- 2 files changed, 1 insertion(+), 40 deletions(-) diff --git a/README.md b/README.md index 8f2a3c4..434bd1d 100644 --- a/README.md +++ b/README.md @@ -41,11 +41,10 @@ The advantages of this project over the Ubuntu installer are: 1. it supports pools configuration; 1. it allows specifying the RAID type; 1. it allows customization of the disk partitions; -1. it supports additional features (e.g. encryption); +1. it supports additional features (e.g. encryption and trimming); 1. it supports new OpenZFS versions, via PPA `jonathonf/zfs`. 1. it supports many more operating systems; 1. it supports unattended installations, via custom scripts; -1. it installs a convenient trimming job for ZFS pools; 1. it's easy to extend. The disadvantages are: diff --git a/install-zfs.sh b/install-zfs.sh index 29bd464..60f14f3 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1339,43 +1339,6 @@ function fix_filesystem_mount_ordering { chroot_execute "sed -Ei 's|$c_zfs_mount_dir/?|/|' /etc/zfs/zfs-list.cache/*" } -# We don't care about synchronizing with the `fstrim` service for two reasons: -# -# - we assume that there are no other (significantly) large filesystems; -# - trimming is fast (takes minutes on a 1 TB disk). -# -# The code is a straight copy of the `fstrim` service. -# -function configure_pools_trimming { - chroot_execute "cat > /lib/systemd/system/zfs-trim.service << UNIT -[Unit] -Description=Discard unused ZFS blocks -ConditionVirtualization=!container - -[Service] -Type=oneshot -ExecStart=/sbin/zpool trim $c_bpool_name -ExecStart=/sbin/zpool trim $v_rpool_name -UNIT" - - chroot_execute " cat > /lib/systemd/system/zfs-trim.timer << TIMER -[Unit] -Description=Discard unused ZFS blocks once a week -ConditionVirtualization=!container - -[Timer] -OnCalendar=weekly -AccuracySec=1h -Persistent=true - -[Install] -WantedBy=timers.target -TIMER" - - chroot_execute "systemctl daemon-reload" - chroot_execute "systemctl enable zfs-trim.timer" -} - function configure_remaining_settings { [[ $v_swap_size -gt 0 ]] && chroot_execute "echo /dev/zvol/$v_rpool_name/swap none swap discard 0 0 >> /etc/fstab" || true chroot_execute "echo RESUME=none > /etc/initramfs-tools/conf.d/resume" @@ -1480,7 +1443,6 @@ invoke "configure_and_update_grub" invoke "sync_efi_partitions" invoke "update_initramfs" invoke "fix_filesystem_mount_ordering" -invoke "configure_pools_trimming" invoke "configure_remaining_settings" invoke "prepare_for_system_exit" From d1e167a359971febb31a2fe8b85f83602edccb8f Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 19:56:56 +0200 Subject: [PATCH 51/90] README: Mention that ZFS 2.x can be installed --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 434bd1d..97d29fb 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ The program currently supports: - Linux Mint 19.x, 20 - ElementaryOS 5.1 -The ZFS version installed is 0.8, which supports native encryption and trimming (among the other improvements over 0.7). The required repositories are automatically added to the destination system. +The ZFS version installed is 0.8 (optionally, 2.x), which supports native encryption and trimming (among the other improvements over 0.7). The required repositories are automatically added to the destination system. EFI boot is required (any modern (2011+) system will do); legacy boot is currently not supported. From d1900f1482a9cdd0703154f2b16a31bbea9b0565 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 18:29:10 +0200 Subject: [PATCH 52/90] Fix Shellcheck complaint about not following the hotswap script --- install-zfs.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/install-zfs.sh b/install-zfs.sh index 60f14f3..96bfd68 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -167,6 +167,7 @@ function invoke { # function hot_swap_script { if [[ -f $c_hotswap_file ]]; then + # shellcheck disable=1090 # can't follow; the file might not exist anyway. source "$c_hotswap_file" fi } From e02bcf63744f4ecc6bf81d8613504ed73b9a96b7 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 11:24:26 +0200 Subject: [PATCH 53/90] Cosmetic: Rename `c_installed_os_data_mount_dir` This is actually to make things a bit easier with the datasets definition. --- install-zfs.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 96bfd68..edb652e 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -84,7 +84,7 @@ c_default_rpool_create_options=( -O devices=off ) c_zfs_mount_dir=/mnt -c_installed_os_data_mount_dir=/target +c_installed_os_mount_dir=/target declare -A c_supported_linux_distributions=([Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) c_temporary_volume_size=12 # gigabytes; large enough - Debian, for example, takes ~8 GiB. c_passphrase_named_pipe=$(dirname "$(mktemp)")/zfs-installer.pp.fifo @@ -927,11 +927,11 @@ Proceed with the configuration as usual, then, at the partitioning stage: # # Note that we assume that the user created only one partition on the temp volume, as expected. # - if ! mountpoint -q "$c_installed_os_data_mount_dir"; then - mount "$v_temp_volume_device" "$c_installed_os_data_mount_dir" + if ! mountpoint -q "$c_installed_os_mount_dir"; then + mount "$v_temp_volume_device" "$c_installed_os_mount_dir" fi - rm -f "$c_installed_os_data_mount_dir/swapfile" + rm -f "$c_installed_os_mount_dir/swapfile" } function install_operating_system_Debian { @@ -962,24 +962,24 @@ Proceed with the configuration as usual, then, at the partitioning stage: DISPLAY=:0 calamares - mkdir -p "$c_installed_os_data_mount_dir" + mkdir -p "$c_installed_os_mount_dir" # Note how in Debian, for reasons currenly unclear, the mount fails if the partition is passed; # it requires the device to be passed. # - mount "${v_temp_volume_device}" "$c_installed_os_data_mount_dir" + mount "${v_temp_volume_device}" "$c_installed_os_mount_dir" # We don't use chroot()_execute here, as it works on $c_zfs_mount_dir (which is synced on a # later stage). # set +x - chroot "$c_installed_os_data_mount_dir" bash -c "echo root:$(printf "%q" "$v_root_password") | chpasswd" + chroot "$c_installed_os_mount_dir" bash -c "echo root:$(printf "%q" "$v_root_password") | chpasswd" set -x # The installer doesn't set the network interfaces, so, for convenience, we do it. # for interface in $(ip addr show | perl -lne '/^\d+: (?!lo:)(\w+)/ && print $1' ); do - cat > "$c_installed_os_data_mount_dir/etc/network/interfaces.d/$interface" < "$c_installed_os_mount_dir/etc/network/interfaces.d/$interface" < Date: Tue, 27 Apr 2021 18:48:31 +0200 Subject: [PATCH 54/90] Cosmetic: Remove unnecessary quotes, and make use of `-z`/`-n` in conditionals --- install-zfs.sh | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index edb652e..57565f1 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -308,7 +308,7 @@ function check_prerequisites { elif [[ $(id -u) -ne 0 ]]; then echo 'This script must be run with administrative privileges!' exit 1 - elif [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" != "" && ! -x "$ZFS_OS_INSTALLATION_SCRIPT" ]]; then + elif [[ -n ${ZFS_OS_INSTALLATION_SCRIPT:-} && ! -x $ZFS_OS_INSTALLATION_SCRIPT ]]; then echo "The custom O/S installation script provided doesn't exist or is not executable!" exit 1 elif [[ ! -v c_supported_linux_distributions["$v_linux_distribution"] ]]; then @@ -337,7 +337,7 @@ This script will prepare the ZFS pools on the system, install Ubuntu, and config In order to stop the procedure, hit Esc twice during dialogs (excluding yes/no ones), or Ctrl+C while any operation is running. ' - if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then + if [[ -z ${ZFS_NO_INFO_MESSAGES:-} ]]; then whiptail --msgbox "$dialog_message" 30 100 fi } @@ -525,7 +525,7 @@ export ZFS_FREE_TAIL_SPACE=12 } function select_disks { - if [[ "${ZFS_SELECTED_DISKS:-}" != "" ]]; then + if [[ -n ${ZFS_SELECTED_DISKS:-} ]]; then mapfile -d, -t v_selected_disks < <(echo -n "$ZFS_SELECTED_DISKS") else while true; do @@ -608,13 +608,13 @@ function select_pools_raid_type { function ask_root_password_Debian { set +x - if [[ ${ZFS_DEBIAN_ROOT_PASSWORD:-} != "" ]]; then + if [[ -n ${ZFS_DEBIAN_ROOT_PASSWORD:-} ]]; then v_root_password="$ZFS_DEBIAN_ROOT_PASSWORD" else local password_invalid_message= local password_repeat=- - while [[ "$v_root_password" != "$password_repeat" || "$v_root_password" == "" ]]; do + while [[ $v_root_password != "$password_repeat" || -z $v_root_password ]]; do v_root_password=$(whiptail --passwordbox "${password_invalid_message}Please enter the root account password (can't be empty):" 30 100 3>&1 1>&2 2>&3) password_repeat=$(whiptail --passwordbox "Please repeat the password:" 30 100 3>&1 1>&2 2>&3) @@ -655,7 +655,7 @@ Leave blank to keep encryption disabled. } function ask_boot_partition_size { - if [[ ${ZFS_BOOT_PARTITION_SIZE:-} != "" ]]; then + if [[ -n ${ZFS_BOOT_PARTITION_SIZE:-} ]]; then v_boot_partition_size=$ZFS_BOOT_PARTITION_SIZE else local boot_partition_size_invalid_message= @@ -673,7 +673,7 @@ Supported formats: '512M', '3G'" 30 100 ${c_default_boot_partition_size}M 3>&1 1 } function ask_swap_size { - if [[ ${ZFS_SWAP_SIZE:-} != "" ]]; then + if [[ -n ${ZFS_SWAP_SIZE:-} ]]; then v_swap_size=$ZFS_SWAP_SIZE else local swap_size_invalid_message= @@ -689,7 +689,7 @@ function ask_swap_size { } function ask_free_tail_space { - if [[ ${ZFS_FREE_TAIL_SPACE:-} != "" ]]; then + if [[ -n ${ZFS_FREE_TAIL_SPACE:-} ]]; then v_free_tail_space=$ZFS_FREE_TAIL_SPACE else local tail_space_invalid_message= @@ -713,7 +713,7 @@ For detailed informations, see the wiki page: https://github.com/saveriomiroddi/ } function ask_rpool_name { - if [[ ${ZFS_RPOOL_NAME:-} != "" ]]; then + if [[ -n ${ZFS_RPOOL_NAME:-} ]]; then v_rpool_name=$ZFS_RPOOL_NAME else local rpool_name_invalid_message= @@ -907,7 +907,7 @@ Proceed with the configuration as usual, then, at the partitioning stage: - at the end, choose `Continue Testing` ' - if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then + if [[ -z ${ZFS_NO_INFO_MESSAGES:-} ]]; then whiptail --msgbox "$dialog_message" 30 100 fi @@ -952,7 +952,7 @@ Proceed with the configuration as usual, then, at the partitioning stage: - at the end, uncheck `Restart now`, and click `Done` ' - if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then + if [[ -z ${ZFS_NO_INFO_MESSAGES:-} ]]; then whiptail --msgbox "$dialog_message" 30 100 fi @@ -1382,7 +1382,7 @@ function display_exit_banner { You now need to perform a hard reset, then enjoy your ZFS system :-)" - if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then + if [[ -z ${ZFS_NO_INFO_MESSAGES:-} ]]; then whiptail --msgbox "$dialog_message" 30 100 fi } @@ -1419,7 +1419,7 @@ invoke "ask_pool_create_options" invoke "install_host_packages" invoke "setup_partitions" -if [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" == "" ]]; then +if [[ -z ${ZFS_OS_INSTALLATION_SCRIPT:-} ]]; then # Includes the O/S extra configuration, if necessary (network, root pwd, etc.) invoke "install_operating_system" From 8f75a078aaf80a4da0128d926b1209016b823d58 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 11:12:40 +0200 Subject: [PATCH 55/90] Add support for datasets --- install-zfs.sh | 75 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 7 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 57565f1..1017702 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -83,6 +83,34 @@ c_default_rpool_create_options=( -O xattr=sa -O devices=off ) +# Can't include double quotes, due to the templating logic. +# +c_default_dataset_create_options=' +ROOT mountpoint=/ com.ubuntu.zsys:bootfs=yes com.ubuntu.zsys:last-used=$(date +%s) +ROOT/srv com.ubuntu.zsys:bootfs=no +ROOT/usr canmount=off com.ubuntu.zsys:bootfs=no +ROOT/usr/local +ROOT/var canmount=off com.ubuntu.zsys:bootfs=no +ROOT/var/games +ROOT/var/lib +ROOT/var/lib/AccountsService +ROOT/var/lib/apt +ROOT/var/lib/dpkg +ROOT/var/lib/NetworkManager +ROOT/var/log +ROOT/var/mail +ROOT/var/snap +ROOT/var/spool +ROOT/var/www +ROOT/tmp com.ubuntu.zsys:bootfs=no + +USERDATA mountpoint=/ canmount=off +USERDATA/root mountpoint=/root canmount=on com.ubuntu.zsys:bootfs-datasets=$v_rpool_name/ROOT + +$(find $c_installed_os_mount_dir/home -mindepth 1 -maxdepth 1 -printf '\'' +USERDATA/%P mountpoint=/home/%P canmount=on com.ubuntu.zsys:bootfs-datasets=$v_rpool_name/%P +'\'') +' c_zfs_mount_dir=/mnt c_installed_os_mount_dir=/target declare -A c_supported_linux_distributions=([Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) @@ -1031,7 +1059,7 @@ function custom_install_operating_system { sudo "$ZFS_OS_INSTALLATION_SCRIPT" } -function create_pools { +function create_pools_and_datasets { # POOL OPTIONS ####################### local encryption_options=() @@ -1049,9 +1077,10 @@ function create_pools { bpool_disks_partitions+=("${selected_disk}-part2") done - # POOLS CREATION ##################### + # ROOT POOL CREATION ################# - # The root pool must be created first, since the boot pool mountpoint is inside it. + # In this script, the boot pool doesn't have a root dataset; since its creation will mount /boot, + # it needs to be done after the root pool root dataset is created. set +x echo -n "$v_passphrase" > "$c_passphrase_named_pipe" & @@ -1066,10 +1095,42 @@ function create_pools { zpool create \ "${encryption_options[@]}" \ "${v_rpool_create_options[@]}" \ - -O mountpoint=/ -R "$c_zfs_mount_dir" -f \ + -O mountpoint=/ -O canmount=off -R "$c_zfs_mount_dir" -f \ "$v_rpool_name" "${v_pools_raid_type[@]}" "${rpool_disks_partitions[@]}" \ < "$c_passphrase_named_pipe" + # DATASETS CREATION ################## + + local interpolated_dataset_create_options + interpolated_dataset_create_options=$(eval echo \""$c_default_dataset_create_options"\") + + echo "Interpolated dataset create options:" + echo "$interpolated_dataset_create_options" + echo + + while read -r dataset_metadata_line || [[ -n $dataset_metadata_line ]]; do + if [[ $dataset_metadata_line =~ [^[:space:]] ]]; then + local dataset_metadata_entries + # shellcheck disable=2206 # cheating for simplicity (alternative: sed and mapfile). + dataset_metadata_entries=($dataset_metadata_line) + + local dataset=$v_rpool_name/${dataset_metadata_entries[0]} + local options=("${dataset_metadata_entries[@]:1}") + + # Prepend the `-o`. + # + # shellcheck disable=2068 # cheating for simplicity (otherwise each `-o $option` will be a single + # string). + zfs create ${options[@]/#/-o } "$dataset" + fi + done < <(echo "$interpolated_dataset_create_options") + + chmod 700 /mnt/root + # This is fine independently of the user creating a dataset for /tmp or not. + chmod 1777 /mnt/tmp + + # BOOT POOL CREATION ################# + zpool create \ -o cachefile=/etc/zfs/zpool.cache \ "${v_bpool_create_options[@]}" \ @@ -1314,7 +1375,7 @@ function fix_filesystem_mount_ordering { # For the rpool only, it takes around half second on a test VM. # chroot_execute "zfs set canmount=on $c_bpool_name" - chroot_execute "zfs set canmount=on $v_rpool_name" + chroot_execute "zfs set canmount=on $v_rpool_name/ROOT" SECONDS=0 @@ -1423,13 +1484,13 @@ if [[ -z ${ZFS_OS_INSTALLATION_SCRIPT:-} ]]; then # Includes the O/S extra configuration, if necessary (network, root pwd, etc.) invoke "install_operating_system" - invoke "create_pools" + invoke "create_pools_and_datasets" invoke "create_swap_volume" invoke "copy_zpool_cache" invoke "sync_os_temp_installation_dir_to_rpool" invoke "remove_temp_partition_and_expand_rpool" else - invoke "create_pools" + invoke "create_pools_and_datasets" invoke "create_swap_volume" invoke "copy_zpool_cache" invoke "remove_temp_partition_and_expand_rpool" From 198afb8762fabc4015204385212acfd9908e6698 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 18:46:13 +0200 Subject: [PATCH 56/90] Allow dataset creation options customization via env variable --- install-zfs.sh | 52 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 1017702..3975cf7 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -26,6 +26,7 @@ v_passphrase= v_root_password= # Debian-only v_rpool_name= v_rpool_create_options= # array; see defaults below for format +v_dataset_create_options= # string; see help for format v_pools_raid_type=() declare -a v_selected_disks # (/dev/by-id/disk_id, ...) v_swap_size= # integer @@ -83,6 +84,10 @@ c_default_rpool_create_options=( -O xattr=sa -O devices=off ) +c_dataset_options_help='# The defaults create a root pool similar to the Ubuntu default; see the script help for details. +# Double quotes are forbidden; lines starting with a hash (`#`) are ignored. +# Parameters and command substitutions are applied; useful variables are $c_zfs_mount_dir and $v_rpool_name. +' # Can't include double quotes, due to the templating logic. # c_default_dataset_create_options=' @@ -264,6 +269,7 @@ The procedure can be entirely automated via environment variables: - ZFS_BPOOL_CREATE_OPTIONS : boot pool options to set on creation (see defaults below) - ZFS_RPOOL_CREATE_OPTIONS : root pool options to set on creation (see defaults below) - ZFS_POOLS_RAID_TYPE : options: blank (striping), `mirror`, `raidz`, `raidz2`, `raidz3`; if unset, it will be asked. +- ZFS_DATASET_CREATE_OPTIONS : see explanation below - ZFS_NO_INFO_MESSAGES : set 1 to skip informational messages - ZFS_SWAP_SIZE : swap size (integer); set 0 for no swap - ZFS_FREE_TAIL_SPACE : leave free space at the end of each disk (integer), for example, for a swap partition @@ -279,6 +285,19 @@ When installing the O/S via $ZFS_OS_INSTALLATION_SCRIPT, the root pool is mounte Boot pool default create options: '"${c_default_bpool_create_options[*]/#-/$'\n' -}"' Root pool default create options: '"${c_default_rpool_create_options[*]/#-/$'\n' -}"' + +The root pool dataset creation options can be specified by passing a string of whom each line has: + +- the dataset name (without the pool) +- the options (without `-o`) + +The defaults, which create a root pool similar to the Ubuntu default, are: + +'"$(echo -n "$c_default_dataset_create_options" | sed 's/^/ /')"' + +Double quotes are forbidden. Parameters and command substitutions are applied; useful variables are $c_zfs_mount_dir and $v_rpool_name. + +Datasets are created after the operating system is installed; at that stage, it'\'' mounted in the directory specified by $c_zfs_mount_dir. ' echo "$help" @@ -552,6 +571,12 @@ export ZFS_FREE_TAIL_SPACE=12 trap _exit_hook EXIT } +# Whiptail's lack of multiline editing is quite painful. +# +function install_dialog_package { + apt install -y dialog +} + function select_disks { if [[ -n ${ZFS_SELECTED_DISKS:-} ]]; then mapfile -d, -t v_selected_disks < <(echo -n "$ZFS_SELECTED_DISKS") @@ -776,6 +801,29 @@ The encryption/mount-related options are automatically added, and must not be sp print_variables v_bpool_create_options v_rpool_create_options } +function ask_dataset_create_options { + if [[ -n ${ZFS_DATASET_CREATE_OPTIONS:-} ]]; then + v_dataset_create_options=$ZFS_DATASET_CREATE_OPTIONS + else + while true; do + local tempfile + tempfile=$(mktemp) + + echo "$c_dataset_options_help$c_default_dataset_create_options" > "$tempfile" + + local user_value + user_value=$(dialog --editbox "$tempfile" 30 120 3>&1 1>&2 2>&3) + + if [[ -n $user_value && $user_value != *\"* ]]; then + v_dataset_create_options=$(echo "$user_value" | perl -ne 'print unless /^\s*#/') + break + fi + done + fi + + print_variables v_dataset_create_options +} + function install_host_packages { if [[ $v_use_ppa == "1" ]]; then if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then @@ -1102,7 +1150,7 @@ function create_pools_and_datasets { # DATASETS CREATION ################## local interpolated_dataset_create_options - interpolated_dataset_create_options=$(eval echo \""$c_default_dataset_create_options"\") + interpolated_dataset_create_options=$(eval echo \""$v_dataset_create_options"\") echo "Interpolated dataset create options:" echo "$interpolated_dataset_create_options" @@ -1466,6 +1514,7 @@ invoke "find_suitable_disks" invoke "set_zfs_ppa_requirement" invoke "register_exit_hook" invoke "create_passphrase_named_pipe" +invoke "install_dialog_package" invoke "select_disks" invoke "select_pools_raid_type" @@ -1476,6 +1525,7 @@ invoke "ask_swap_size" invoke "ask_free_tail_space" invoke "ask_rpool_name" invoke "ask_pool_create_options" +invoke "ask_dataset_create_options" invoke "install_host_packages" invoke "setup_partitions" From 355c76cc0f01d44a4ba91c7e4820a6776b475952 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Tue, 27 Apr 2021 19:54:09 +0200 Subject: [PATCH 57/90] Update README --- README.md | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 97d29fb..7c6c4c9 100644 --- a/README.md +++ b/README.md @@ -38,19 +38,14 @@ As of 20.04, Canonical makes available an experimental ZFS installer on Ubuntu D The advantages of this project over the Ubuntu installer are: -1. it supports pools configuration; -1. it allows specifying the RAID type; -1. it allows customization of the disk partitions; +1. it allows configuring pools, datasets and the RAID type; +1. it allows customizing the disk partitions; 1. it supports additional features (e.g. encryption and trimming); -1. it supports new OpenZFS versions, via PPA `jonathonf/zfs`. +1. it supports newer OpenZFS versions, via PPA `jonathonf/zfs`. 1. it supports many more operating systems; 1. it supports unattended installations, via custom scripts; 1. it's easy to extend. -The disadvantages are: - -1. the Ubuntu installer has a more sophisticated filesystem layout - it separates base directories into different ZFS filesystems (this is planned to be implemented in the ZFS installer as well). - ## Instructions Start the live CD of a supported Linux distribution, then open a terminal and execute: From 2bd5f16cc1dd9e277d5b568d19b053bbe9267acb Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Wed, 28 Apr 2021 15:38:59 +0200 Subject: [PATCH 58/90] Cosmetic: Move set_zfs_ppa_requirement forward In preparation for the next changes. --- install-zfs.sh | 82 +++++++++++++++++++++++++------------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 3975cf7..c466cb4 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -476,46 +476,6 @@ If you think this is a bug, please open an issue on https://github.com/saveriomi print_variables v_suitable_disks } -# REQUIREMENT: it must be ensured that, for any distro, `apt update` is invoked at this step, as -# subsequent steps rely on it. -# -# There are three parameters: -# -# 1. the tools are preinstalled (ie. Ubuntu Desktop based); -# 2. the default repository supports ZFS 0.8 (ie. Ubuntu 20.04+ based); -# 3. the distro provides the precompiled ZFS module (i.e. Ubuntu based, not Debian) -# -# Fortunately, with Debian-specific logic isolated, we need conditionals based only on #2 - see -# install_host_packages() and install_host_packages_UbuntuServer(). -# -function set_zfs_ppa_requirement { - apt update - - local zfs_package_version - zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') - - # Test returns true if $zfs_package_version is blank. - # - if [[ ${ZFS_USE_PPA:-} == "1" ]] || dpkg --compare-versions "$zfs_package_version" lt 0.8; then - v_use_ppa=1 - fi -} - -function set_zfs_ppa_requirement_Debian { - # Only update apt; in this case, ZFS packages are handled in a specific way. - - apt update -} - -# Mint 20 has the CDROM repository enabled, but apt fails when updating due to it (or possibly due -# to it being incorrectly setup). -# -function set_zfs_ppa_requirement_Linuxmint { - perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list - - invoke "set_zfs_ppa_requirement" -} - # By using a FIFO, we avoid having to hide statements like `echo $v_passphrase | zpoool create ...` # from the logs. # @@ -571,6 +531,46 @@ export ZFS_FREE_TAIL_SPACE=12 trap _exit_hook EXIT } +# REQUIREMENT: it must be ensured that, for any distro, `apt update` is invoked at this step, as +# subsequent steps rely on it. +# +# There are three parameters: +# +# 1. the tools are preinstalled (ie. Ubuntu Desktop based); +# 2. the default repository supports ZFS 0.8 (ie. Ubuntu 20.04+ based); +# 3. the distro provides the precompiled ZFS module (i.e. Ubuntu based, not Debian) +# +# Fortunately, with Debian-specific logic isolated, we need conditionals based only on #2 - see +# install_host_packages() and install_host_packages_UbuntuServer(). +# +function set_zfs_ppa_requirement { + apt update + + local zfs_package_version + zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') + + # Test returns true if $zfs_package_version is blank. + # + if [[ ${ZFS_USE_PPA:-} == "1" ]] || dpkg --compare-versions "$zfs_package_version" lt 0.8; then + v_use_ppa=1 + fi +} + +function set_zfs_ppa_requirement_Debian { + # Only update apt; in this case, ZFS packages are handled in a specific way. + + apt update +} + +# Mint 20 has the CDROM repository enabled, but apt fails when updating due to it (or possibly due +# to it being incorrectly setup). +# +function set_zfs_ppa_requirement_Linuxmint { + perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list + + invoke "set_zfs_ppa_requirement" +} + # Whiptail's lack of multiline editing is quite painful. # function install_dialog_package { @@ -1511,9 +1511,9 @@ invoke "display_intro_banner" invoke "check_system_memory" invoke "save_disks_log" invoke "find_suitable_disks" -invoke "set_zfs_ppa_requirement" invoke "register_exit_hook" invoke "create_passphrase_named_pipe" +invoke "set_zfs_ppa_requirement" invoke "install_dialog_package" invoke "select_disks" From cca6b2bef9286ffb4227ee36be2cdff76eed073e Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Wed, 28 Apr 2021 15:41:57 +0200 Subject: [PATCH 59/90] Refactoring: Split apt index update into separate step --- install-zfs.sh | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index c466cb4..0ef7d0c 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -531,6 +531,19 @@ export ZFS_FREE_TAIL_SPACE=12 trap _exit_hook EXIT } +function update_apt_index { + apt update +} + +# Mint 20 has the CDROM repository enabled, but apt fails when updating due to it (or possibly due +# to it being incorrectly setup). +# +function update_apt_index_Linuxmint { + perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list + + invoke "update_apt_index" +} + # REQUIREMENT: it must be ensured that, for any distro, `apt update` is invoked at this step, as # subsequent steps rely on it. # @@ -544,8 +557,6 @@ export ZFS_FREE_TAIL_SPACE=12 # install_host_packages() and install_host_packages_UbuntuServer(). # function set_zfs_ppa_requirement { - apt update - local zfs_package_version zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') @@ -558,17 +569,7 @@ function set_zfs_ppa_requirement { function set_zfs_ppa_requirement_Debian { # Only update apt; in this case, ZFS packages are handled in a specific way. - - apt update -} - -# Mint 20 has the CDROM repository enabled, but apt fails when updating due to it (or possibly due -# to it being incorrectly setup). -# -function set_zfs_ppa_requirement_Linuxmint { - perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list - - invoke "set_zfs_ppa_requirement" + : } # Whiptail's lack of multiline editing is quite painful. @@ -1513,6 +1514,7 @@ invoke "save_disks_log" invoke "find_suitable_disks" invoke "register_exit_hook" invoke "create_passphrase_named_pipe" +invoke "update_apt_index" invoke "set_zfs_ppa_requirement" invoke "install_dialog_package" From 84b3cbe9c0441e0fc2022256ce9bd0cf312e5aac Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Wed, 28 Apr 2021 15:55:11 +0200 Subject: [PATCH 60/90] Separate step "prepare_standard_repositories", and add universe repository --- install-zfs.sh | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 0ef7d0c..9a7df72 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -531,22 +531,33 @@ export ZFS_FREE_TAIL_SPACE=12 trap _exit_hook EXIT } -function update_apt_index { - apt update +function prepare_standard_repositories { + # Make sure it's enabled. Ubuntu MATE has it, while the standard Ubuntu doesn't. + # The program exits with success if the repository is already enabled. + # + add-apt-repository --yes --no-update universe } # Mint 20 has the CDROM repository enabled, but apt fails when updating due to it (or possibly due # to it being incorrectly setup). # -function update_apt_index_Linuxmint { +function prepare_standard_repositories_Linuxmint { perl -i -pe 's/^(deb cdrom)/# $1/' /etc/apt/sources.list - invoke "update_apt_index" + # The universe repository may be already enabled, but it's more solid to ensure it. + # + invoke "prepare_standard_repositories" +} + +function prepare_standard_repositories_Debian { + # Debian doesn't require universe (for dialog). + : +} + +function update_apt_index { + apt update } -# REQUIREMENT: it must be ensured that, for any distro, `apt update` is invoked at this step, as -# subsequent steps rely on it. -# # There are three parameters: # # 1. the tools are preinstalled (ie. Ubuntu Desktop based); @@ -1514,6 +1525,7 @@ invoke "save_disks_log" invoke "find_suitable_disks" invoke "register_exit_hook" invoke "create_passphrase_named_pipe" +invoke "prepare_standard_repositories" invoke "update_apt_index" invoke "set_zfs_ppa_requirement" invoke "install_dialog_package" From 9adce4e14147b49ffc221c1279f6525168f6e977 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Wed, 28 Apr 2021 15:56:14 +0200 Subject: [PATCH 61/90] Rename horrible function `set_zfs_ppa_requirement` --- install-zfs.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 9a7df72..1eb928f 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -567,7 +567,7 @@ function update_apt_index { # Fortunately, with Debian-specific logic isolated, we need conditionals based only on #2 - see # install_host_packages() and install_host_packages_UbuntuServer(). # -function set_zfs_ppa_requirement { +function set_use_zfs_ppa { local zfs_package_version zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') @@ -578,7 +578,7 @@ function set_zfs_ppa_requirement { fi } -function set_zfs_ppa_requirement_Debian { +function set_use_zfs_ppa_Debian { # Only update apt; in this case, ZFS packages are handled in a specific way. : } @@ -1527,7 +1527,7 @@ invoke "register_exit_hook" invoke "create_passphrase_named_pipe" invoke "prepare_standard_repositories" invoke "update_apt_index" -invoke "set_zfs_ppa_requirement" +invoke "set_use_zfs_ppa" invoke "install_dialog_package" invoke "select_disks" From 224e630b7f97b76310c2d8eca2f716c2a6499361 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Wed, 28 Apr 2021 23:41:06 +0200 Subject: [PATCH 62/90] Remove obsolete comment about setting use PPA logic --- install-zfs.sh | 9 --------- 1 file changed, 9 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 1eb928f..fb9db57 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -558,15 +558,6 @@ function update_apt_index { apt update } -# There are three parameters: -# -# 1. the tools are preinstalled (ie. Ubuntu Desktop based); -# 2. the default repository supports ZFS 0.8 (ie. Ubuntu 20.04+ based); -# 3. the distro provides the precompiled ZFS module (i.e. Ubuntu based, not Debian) -# -# Fortunately, with Debian-specific logic isolated, we need conditionals based only on #2 - see -# install_host_packages() and install_host_packages_UbuntuServer(). -# function set_use_zfs_ppa { local zfs_package_version zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print /^Version: (\d+\.\d+)/') From f2be14369f70dd9d497abc5b87b3556ebe878907 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Wed, 28 Apr 2021 23:40:03 +0200 Subject: [PATCH 63/90] Refactored package installation functions KUbuntu doesn't ship with zfsutils-linux installed, however, this doesn't complete the support. --- install-zfs.sh | 64 ++++++++++++++++++++------------------------------ 1 file changed, 25 insertions(+), 39 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index fb9db57..47dfda5 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -576,8 +576,12 @@ function set_use_zfs_ppa_Debian { # Whiptail's lack of multiline editing is quite painful. # -function install_dialog_package { - apt install -y dialog +function install_host_base_packages { + # `efibootmgr` needs installation on all the systems. + # the other packages are each required by different distros, so for simplicity, they're all packed + # together. + # + apt install -y efibootmgr dialog software-properties-common } function select_disks { @@ -827,7 +831,7 @@ function ask_dataset_create_options { print_variables v_dataset_create_options } -function install_host_packages { +function install_host_zfs_packages { if [[ $v_use_ppa == "1" ]]; then if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then add-apt-repository --yes "$c_ppa" @@ -846,12 +850,14 @@ function install_host_packages { fi fi - apt install --yes efibootmgr + # Required only by some distros. + # + apt install --yes zfsutils-linux zfs --version > "$c_zfs_module_version_log" 2>&1 } -function install_host_packages_Debian { +function install_host_zfs_packages_Debian { if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections @@ -864,29 +870,10 @@ function install_host_packages_Debian { modprobe zfs fi - apt install --yes efibootmgr - zfs --version > "$c_zfs_module_version_log" 2>&1 } -# Differently from Ubuntu, Mint doesn't have the package installed in the live version. -# -function install_host_packages_Linuxmint { - apt install --yes zfsutils-linux - - invoke "install_host_packages" -} - -function install_host_packages_elementary { - if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then - apt update - apt install --yes software-properties-common - fi - - invoke "install_host_packages" -} - -function install_host_packages_UbuntuServer { +function install_host_zfs_packages_UbuntuServer { if [[ $v_use_ppa != "1" ]]; then apt install --yes zfsutils-linux efibootmgr @@ -911,7 +898,7 @@ function install_host_packages_UbuntuServer { apt update apt install --yes "linux-headers-$(uname -r)" - install_host_packages + install_host_zfs_packages else apt install --yes efibootmgr fi @@ -1279,7 +1266,13 @@ function prepare_jail { chroot_execute 'echo "nameserver 8.8.8.8" >> /etc/resolv.conf' } -# See install_host_packages() for some comments. +# Same principle as install_host_base_packages(). +# +function install_jail_base_packages { + chroot_execute "apt install --yes rsync grub-efi-amd64-signed shim-signed software-properties-common" +} + +# See install_host_zfs_packages() for some comments. # function install_jail_zfs_packages { if [[ $v_use_ppa == "1" ]]; then @@ -1301,8 +1294,6 @@ function install_jail_zfs_packages { # chroot_execute "apt install --yes libzfs2linux zfs-initramfs zfs-zed zfsutils-linux" fi - - chroot_execute "apt install --yes grub-efi-amd64-signed shim-signed" } function install_jail_zfs_packages_Debian { @@ -1321,18 +1312,12 @@ APT' chroot_execute "apt update" chroot_execute 'echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections' - chroot_execute "apt install --yes rsync zfs-initramfs zfs-dkms grub-efi-amd64-signed shim-signed" -} - -function install_jail_zfs_packages_elementary { - chroot_execute "apt install --yes software-properties-common" - - invoke "install_jail_zfs_packages" + chroot_execute "apt install --yes zfs-initramfs zfs-dkms" } function install_jail_zfs_packages_UbuntuServer { if [[ $v_use_ppa != "1" ]]; then - chroot_execute "apt install --yes zfsutils-linux zfs-initramfs grub-efi-amd64-signed shim-signed" + chroot_execute "apt install --yes zfsutils-linux zfs-initramfs" else invoke "install_jail_zfs_packages" fi @@ -1519,7 +1504,7 @@ invoke "create_passphrase_named_pipe" invoke "prepare_standard_repositories" invoke "update_apt_index" invoke "set_use_zfs_ppa" -invoke "install_dialog_package" +invoke "install_host_base_packages" invoke "select_disks" invoke "select_pools_raid_type" @@ -1532,7 +1517,7 @@ invoke "ask_rpool_name" invoke "ask_pool_create_options" invoke "ask_dataset_create_options" -invoke "install_host_packages" +invoke "install_host_zfs_packages" invoke "setup_partitions" if [[ -z ${ZFS_OS_INSTALLATION_SCRIPT:-} ]]; then @@ -1554,6 +1539,7 @@ else fi invoke "prepare_jail" +invoke "install_jail_base_packages" invoke "install_jail_zfs_packages" invoke "prepare_efi_partition" invoke "configure_and_update_grub" From 584946c3d06b0234573f14a9904b6ddcd469993a Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Thu, 29 Apr 2021 00:00:26 +0200 Subject: [PATCH 64/90] Add `--no-update` to all `add-apt-repository` invocations --- install-zfs.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 47dfda5..280bbb9 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -834,7 +834,7 @@ function ask_dataset_create_options { function install_host_zfs_packages { if [[ $v_use_ppa == "1" ]]; then if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then - add-apt-repository --yes "$c_ppa" + add-apt-repository --yes --no-update "$c_ppa" apt update # Libelf-dev allows `CONFIG_STACK_VALIDATION` to be set - it's optional, but good to have. @@ -1276,7 +1276,7 @@ function install_jail_base_packages { # function install_jail_zfs_packages { if [[ $v_use_ppa == "1" ]]; then - chroot_execute "add-apt-repository --yes $c_ppa" + chroot_execute "add-apt-repository --yes --no-update $c_ppa" chroot_execute "apt update" From 4fd8ee1c8a0b26e649ab2aad172c77f7dc96d5d0 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Thu, 29 Apr 2021 00:12:11 +0200 Subject: [PATCH 65/90] Complete KUbuntu support --- install-zfs.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 280bbb9..03ebe0e 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -89,6 +89,7 @@ c_dataset_options_help='# The defaults create a root pool similar to the Ubuntu # Parameters and command substitutions are applied; useful variables are $c_zfs_mount_dir and $v_rpool_name. ' # Can't include double quotes, due to the templating logic. +# KUbuntu has a /home/.directory, which must not be a separate dataset (it's a symlink). # c_default_dataset_create_options=' ROOT mountpoint=/ com.ubuntu.zsys:bootfs=yes com.ubuntu.zsys:last-used=$(date +%s) @@ -112,7 +113,7 @@ ROOT/tmp com.ubuntu.zsys:bootfs=no USERDATA mountpoint=/ canmount=off USERDATA/root mountpoint=/root canmount=on com.ubuntu.zsys:bootfs-datasets=$v_rpool_name/ROOT -$(find $c_installed_os_mount_dir/home -mindepth 1 -maxdepth 1 -printf '\'' +$(find $c_installed_os_mount_dir/home -mindepth 1 -maxdepth 1 -not -name '\''.*'\'' -printf '\'' USERDATA/%P mountpoint=/home/%P canmount=on com.ubuntu.zsys:bootfs-datasets=$v_rpool_name/%P '\'') ' From 8773baa45d19e32b5462f27d8da87da1d35e903e Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 30 Apr 2021 19:43:15 +0200 Subject: [PATCH 66/90] Fix prepare_standard_repositories_Linuxmint() endless loop --- install-zfs.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 03ebe0e..185bac1 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -547,7 +547,7 @@ function prepare_standard_repositories_Linuxmint { # The universe repository may be already enabled, but it's more solid to ensure it. # - invoke "prepare_standard_repositories" + prepare_standard_repositories } function prepare_standard_repositories_Debian { @@ -1320,7 +1320,7 @@ function install_jail_zfs_packages_UbuntuServer { if [[ $v_use_ppa != "1" ]]; then chroot_execute "apt install --yes zfsutils-linux zfs-initramfs" else - invoke "install_jail_zfs_packages" + install_jail_zfs_packages fi } From cf2f9267205e4c83dd484f5cc3c2b8571919ed2f Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 30 Apr 2021 19:44:29 +0200 Subject: [PATCH 67/90] Add comment to invoke() about potential mistake --- install-zfs.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index 185bac1..b821d9d 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -150,6 +150,9 @@ c_udevadm_settle_timeout=10 # seconds # A target function must exist, otherwise a error is raised, unless `--optional` is specified. # `--optional` is useful when a step is specific to a single distribution, e.g. Debian's root password. # +# WATCH OUT! Don't forget *not* to call this from an ovverridden function, otherwise, it will call itself +# endlessly! +# # Examples: # # $ function install_jail_zfs_packages { :; } From 62facba704917852112297c5528fa0b9ea4f9332 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 30 Apr 2021 19:45:50 +0200 Subject: [PATCH 68/90] Fix invoke() example --- install-zfs.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index b821d9d..861ad93 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -157,17 +157,17 @@ c_udevadm_settle_timeout=10 # seconds # # $ function install_jail_zfs_packages { :; } # $ function install_jail_zfs_packages_Debian { :; } -# $ distro_dependent_invoke "install_jail_zfs_packages" +# $ invoke "install_jail_zfs_packages" # # If the distribution is `Debian`, the second will be invoked, otherwise, the first. # # $ function update_zed_cache_Ubuntu { :; } -# $ distro_dependent_invoke "update_zed_cache" --optional +# $ invoke "update_zed_cache" --optional # # If the distribution is `Debian`, nothing will happen. # # $ function update_zed_cache_Ubuntu { :; } -# $ distro_dependent_invoke "update_zed_cache" +# $ invoke "update_zed_cache" # # If the distribution is `Debian`, an error will be raised. # From 59feedb9c9b2e6fbe549c4fcb94312662f6278eb Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 30 Apr 2021 21:43:21 +0200 Subject: [PATCH 69/90] Fix differences between distros in add-apt-repository options Damn Mint! --- install-zfs.sh | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 861ad93..c640e2e 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -247,6 +247,33 @@ function print_variables { echo } +# Very annoyingly, some distros (e.g. Linux Mint) have a "reduced" version of add-apt-repository +# +function checked_add_apt_repository { + local repository=$1 + local option=${2:-} # optional: `--chroot` + + local add_repo_command=(add-apt-repository --yes "$repository") + + if add-apt-repository --help | grep -q "\--no-update"; then + # Assume that when this option isn't available, no update is performed. The fragmentation is a PITA. + # + add_repo_command+=(--no-update) + fi + + case $option in + '') + "${add_repo_command[@]}" + ;; + --chroot) + chroot_execute "${add_repo_command[*]}" + ;; + *) + >&2 echo "Unexpected checked_add_apt_repository option: $2" + exit 1 + esac +} + function chroot_execute { chroot $c_zfs_mount_dir bash -c "$1" } @@ -539,7 +566,7 @@ function prepare_standard_repositories { # Make sure it's enabled. Ubuntu MATE has it, while the standard Ubuntu doesn't. # The program exits with success if the repository is already enabled. # - add-apt-repository --yes --no-update universe + checked_add_apt_repository universe } # Mint 20 has the CDROM repository enabled, but apt fails when updating due to it (or possibly due @@ -838,7 +865,7 @@ function ask_dataset_create_options { function install_host_zfs_packages { if [[ $v_use_ppa == "1" ]]; then if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then - add-apt-repository --yes --no-update "$c_ppa" + checked_add_apt_repository "$c_ppa" apt update # Libelf-dev allows `CONFIG_STACK_VALIDATION` to be set - it's optional, but good to have. @@ -1280,7 +1307,7 @@ function install_jail_base_packages { # function install_jail_zfs_packages { if [[ $v_use_ppa == "1" ]]; then - chroot_execute "add-apt-repository --yes --no-update $c_ppa" + checked_add_apt_repository "$c_ppa" --chroot chroot_execute "apt update" From 84b4e17eddfc549400903a5d373dad81d5c01fa6 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 5 Jun 2021 22:25:08 +0200 Subject: [PATCH 70/90] Ubuntu Server: fix the udev* systemd units (re)start Using a glob requires `--all` in order to work as intended, otherwise, it doesn't start units that are not loaded already. --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index c640e2e..00c0f49 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -920,7 +920,7 @@ function install_host_zfs_packages_UbuntuServer { umount /lib/modules rm -r /lib/modules ln -s /tmp/modules /lib - systemctl start 'systemd-udevd*' + systemctl start --all 'systemd-udevd*' # Additionally, the linux packages for the running kernel are not installed, at least when # the standard installation is performed. Didn't test on the HWE option; if it's not required, From be98f385f1447cb5875d198d1ac1ae91818568b6 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 5 Jun 2021 22:44:56 +0200 Subject: [PATCH 71/90] Ubuntu Server: Create `/target` if missing The directory used to be created before [20.04.2]... AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARGH!!!!!!!!!! --- install-zfs.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index 00c0f49..53c0fdb 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1118,6 +1118,12 @@ You can switch anytime to this terminal, and back, in order to read the instruct # Server, but it's better not to take risks. # if ! mountpoint -q "$c_installed_os_mount_dir"; then + # There must be a conspiracy 🙄 `/target` used to be created before [20.04.2]. + # + if [[ ! -d $c_installed_os_mount_dir ]]; then + mkdir "$c_installed_os_mount_dir" + fi + mount "${v_temp_volume_device}p2" "$c_installed_os_mount_dir" fi From 4c5a16920c41c2458436414dac2cb0301fb4a99d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 5 Jun 2021 23:12:21 +0200 Subject: [PATCH 72/90] Improve message about module compilation requirements The O/S is related up to a certain extent; hardware threads have instead a central role. --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 53c0fdb..f99ab34 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -430,7 +430,7 @@ function check_system_memory { # local dialog_message='WARNING! In some cases, the ZFS modules require compilation. -On systems with relatively little RAM, the procedure may crash during the compilation, for example with 3 GB on Debian 10.9. +On systems with relatively little RAM and many hardware threads, the procedure may crash during the compilation (e.g. 3 GB/16 threads). In such cases, the module building may fail abruptly, either without visible errors (leaving "process killed" messages in the syslog), or with package installation errors (leaving odd errors in the module'\''s `make.log`).' From 0a1a80fa138dbc4bfa06209e9563afcef99bf0ea Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 6 Jun 2021 01:39:19 +0200 Subject: [PATCH 73/90] Disable PPA on Ubuntu Server See #200. --- install-zfs.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index f99ab34..24ca08e 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -395,6 +395,11 @@ function check_prerequisites { elif [[ ! ${c_supported_linux_distributions["$v_linux_distribution"]} =~ $distro_version_regex ]]; then echo "This Linux distribution version ($v_linux_version) is not supported; supported versions: ${c_supported_linux_distributions["$v_linux_distribution"]}" exit 1 + elif [[ ${ZFS_USE_PPA:-} == "1" && $v_linux_distribution == "UbuntuServer" ]]; then + # As of Jun/2021, it breaks the installation. + # + echo "The PPA is not (currently) supported on Ubuntu Server!" + exit 1 fi set +x From 9b634dde32c0aacfa83d0f44b1a7ccf10d6efd83 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 6 Jun 2021 11:57:57 +0200 Subject: [PATCH 74/90] Generalize fix filesystem mount ordering Previously, it wasn't taken into account that the root FS may not be `$v_rpool_name/ROOT`. --- install-zfs.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 24ca08e..ee42c54 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1450,10 +1450,15 @@ function fix_filesystem_mount_ordering { local success= if [[ ! -s $c_zfs_mount_dir/etc/zfs/zfs-list.cache/$c_bpool_name || ! -s $c_zfs_mount_dir/etc/zfs/zfs-list.cache/$v_rpool_name ]]; then + local zfs_root_fs zfs_boot_fs + + zfs_root_fs=$(chroot_execute 'zfs list / | awk "NR==2 {print \$1}"') + zfs_boot_fs=$(chroot_execute 'zfs list /boot | awk "NR==2 {print \$1}"') + # For the rpool only, it takes around half second on a test VM. # - chroot_execute "zfs set canmount=on $c_bpool_name" - chroot_execute "zfs set canmount=on $v_rpool_name/ROOT" + chroot_execute "zfs set canmount=on $zfs_boot_fs" + chroot_execute "zfs set canmount=on $zfs_root_fs" SECONDS=0 From 4d18dc14a6b63fdcd9870e33deac8ddfe230ef3d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 6 Jun 2021 14:14:50 +0200 Subject: [PATCH 75/90] Disable Ubuntu Server on mainline, and update README accordingly The new procedure doesn't directly support Ubuntu Server. Closes #199. --- README.md | 4 ++-- install-zfs.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7c6c4c9..5a9c3ab 100644 --- a/README.md +++ b/README.md @@ -58,10 +58,10 @@ then follow the instructions; halfway through the procedure, the GUI installer o ### Ubuntu Server -Ubuntu Server requires a slightly different execution procedure: +Ubuntu Server requires a slightly different execution procedure, and the version 0.3.x of the script (the [new procedure](https://openzfs.github.io/openzfs-docs/Getting%20Started/Ubuntu/Ubuntu%2020.04%20Root%20on%20ZFS.html) doesn't directly support Ubuntu Server): - when the installer welcome screen shows up, tap `Ctrl+Alt+F2`, -- then type `curl -L https://git.io/JelI5 | sudo bash`. +- then type `curl -L https://git.io/JGjA6 | sudo bash`. then follow the instructions. diff --git a/install-zfs.sh b/install-zfs.sh index ee42c54..53bc0c1 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -119,7 +119,7 @@ USERDATA/%P mountpoint=/home/%P canmount=on com.ubuntu.zsys:b ' c_zfs_mount_dir=/mnt c_installed_os_mount_dir=/target -declare -A c_supported_linux_distributions=([Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) +declare -A c_supported_linux_distributions=([Ubuntu]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) c_temporary_volume_size=12 # gigabytes; large enough - Debian, for example, takes ~8 GiB. c_passphrase_named_pipe=$(dirname "$(mktemp)")/zfs-installer.pp.fifo From d7518ad12b716999c1b05a910a95807a7aea5768 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 13 Jun 2021 10:36:35 +0200 Subject: [PATCH 76/90] Don't set the permissions on rpool directories See the comment. This was also a bug, if the user was not creating `/root` or `/tmp` filesystems. --- install-zfs.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 53bc0c1..a32752c 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1205,9 +1205,10 @@ function create_pools_and_datasets { fi done < <(echo "$interpolated_dataset_create_options") - chmod 700 /mnt/root - # This is fine independently of the user creating a dataset for /tmp or not. - chmod 1777 /mnt/tmp + # Here, the original procedure sets the permissions for /root (700) and /tmp (1777), however, in this + # script we don't need to do it, since we sync the O/S installer result, which is already configured. + # In case of changes, don't forget that the destination layout may be empty, at this point, due to + # the user's ZFS filesystems configuration! # BOOT POOL CREATION ################# From 4a03a0b3603ba4bf030ba2c62cdd6719d19935ff Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 13 Jun 2021 11:49:30 +0200 Subject: [PATCH 77/90] Handle Ubiquity now emptying `/run` See comment for detailed description. --- install-zfs.sh | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index a32752c..3a59fcc 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1256,13 +1256,17 @@ function sync_os_temp_installation_dir_to_rpool { perl -lane 'BEGIN { $/ = "\r"; $|++ } $F[1] =~ /(\d+)%$/ && print $1' | whiptail --gauge "Syncing the installed O/S to the root pool FS..." 30 100 0 - mkdir "$c_zfs_mount_dir/run" - - # Required destination of symlink `/etc/resolv.conf`, present in Ubuntu systems (not Debian). + # Ubiquity used to leave `/target/run`, which included the file symlinked by `/etc/resolv.conf`. At + # some point, it started cleaning it after installation, leaving the symlink broken, which caused + # the jail preparation to fail. For this reason, we now create the dir/file manually. + # As of Jun/2021, it's not clear if there is any O/S leaving the dir/file, but for simplicity, we + # always create them if not existing. # - if [[ -d $c_installed_os_mount_dir/run/systemd/resolve ]]; then - rsync -av --relative "$c_installed_os_mount_dir/run/./systemd/resolve" "$c_zfs_mount_dir/run" - fi + mkdir -p "$c_installed_os_mount_dir/run/systemd/resolve" + touch "$c_installed_os_mount_dir/run/systemd/resolve/stub-resolv.conf" + + mkdir "$c_zfs_mount_dir/run" + rsync -av --relative "$c_installed_os_mount_dir/run/./systemd/resolve" "$c_zfs_mount_dir/run" umount "$c_installed_os_mount_dir" } From 5f1f8e8ce7154f61ecafc1ac0320e6772830fd6b Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 13 Jun 2021 16:05:15 +0200 Subject: [PATCH 78/90] Remove global Shellcheck directives That issues have been fixed. --- install-zfs.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 3a59fcc..dc69de0 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1,5 +1,4 @@ #!/bin/bash -# shellcheck disable=SC2015,SC2016 # Shellcheck issue descriptions: # From ddb4e5e63cde5f361b5e0e101a643b3726c2bb87 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 13 Jun 2021 15:54:16 +0200 Subject: [PATCH 79/90] Officially close project --- README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README.md b/README.md index 5a9c3ab..efd2d03 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ ZFS installer is a shell script program that fully prepares ZFS on a system, and allows an effortless installation of several Ubuntu-based operating systems using their standard installer (or debootstrap, or any custom script). +- [Status](#status) - [Requirements and functionality](#requirements-and-functionality) - [Comparison with Ubuntu built-in installer](#comparison-with-ubuntu-built-in-installer) - [Instructions](#instructions) @@ -15,6 +16,21 @@ ZFS installer is a shell script program that fully prepares ZFS on a system, and - [Help](#help) - [Credits](#credits) +## Status + +**WARNING: THIS PROJECT HAS BEEN PUT ON HOLD, AND IT'S NOT STABLE ANYMORE** + +Working on this type of program is extremely time consuming, for a few reasons: + +1. installers are sometimes updated (even within the same Ubuntu patch version), breaking the program in unpredictable ways; +2. the problem above is compounded by different distros having minor differences in configuration, which again, break the program; +3. it's not possible to automate the testing without a considerably sophisticated tool (which should, in theory, perform the operations on the Ubiquity GUI; this is also impossible for Ubuntu Server); while debootstrap installations can be programmatically tested, the bulk of the bugs are related to the GUI installer +4. I'm the only developer actively working on the project; the ZFS are collaborating with Canonical, and, while the outcome is highly desirable for ZFS, it makes manual execution the only option for even minor customization + +Therefore, I'm closing this project. I may keep developing in order to support my home installation, but I can't give any guarantee. + +I'm open to PR, though. + ## Requirements and functionality The program currently supports: From 9f8c1b85abe4bcee526637dac91e820cafac3305 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 09:02:40 +0200 Subject: [PATCH 80/90] README: Fix ZFS_USE_PPA typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index efd2d03..edb79d2 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ This script needs to be run with admin permissions, from a Live CD. The procedure can be entirely automated via environment variables: - ZFS_OS_INSTALLATION_SCRIPT : path of a script to execute instead of Ubiquity (see dedicated section below) -- ZFS_USE_PPAS : set to 1 to use packages from `ppa:jonathonf/zfs` (automatically set to true if the O/S version doesn't ship at least v0.8) +- ZFS_USE_PPA : set to 1 to use packages from `ppa:jonathonf/zfs` (automatically set to true if the O/S version doesn't ship at least v0.8) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated - ZFS_PASSPHRASE - ZFS_RPOOL_NAME From 86b244274dbc62c0cfaabcbd5c8a6e2ba6e038ed Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 10:11:45 +0200 Subject: [PATCH 81/90] Create bpool dataset(s), in order to avoid GRUB warning --- install-zfs.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index dc69de0..22f3609 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1178,7 +1178,7 @@ function create_pools_and_datasets { "$v_rpool_name" "${v_pools_raid_type[@]}" "${rpool_disks_partitions[@]}" \ < "$c_passphrase_named_pipe" - # DATASETS CREATION ################## + # RPOOL DATASETS CREATION ############ local interpolated_dataset_create_options interpolated_dataset_create_options=$(eval echo \""$v_dataset_create_options"\") @@ -1209,13 +1209,19 @@ function create_pools_and_datasets { # In case of changes, don't forget that the destination layout may be empty, at this point, due to # the user's ZFS filesystems configuration! - # BOOT POOL CREATION ################# + # BOOT POOL/DATASETS CREATION ######## + + # Creating the datasets is not necessary, however, it avoids the annoying GRUB warning when updating + # (`cannot open 'bpool/BOOT/ROOT': dataset does not exist`). zpool create \ -o cachefile=/etc/zfs/zpool.cache \ "${v_bpool_create_options[@]}" \ - -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ + -O mountpoint=/boot -O canmount=off -R "$c_zfs_mount_dir" -f \ "$c_bpool_name" "${v_pools_raid_type[@]}" "${bpool_disks_partitions[@]}" + + zfs create -o canmount=off "$c_bpool_name/BOOT" + zfs create -o mountpoint=/boot "$c_bpool_name/BOOT/ROOT" } function create_swap_volume { From 5ebd17ac0cc8c12dde39b1c577e91b34e979c1a5 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 13:26:42 +0200 Subject: [PATCH 82/90] Fix EFI mounts (from 1 onwards) filesystem UUID --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 22f3609..049c4bf 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1416,7 +1416,7 @@ function sync_efi_partitions { for ((i = 1; i < ${#v_selected_disks[@]}; i++)); do local synced_efi_partition_path="/boot/efi$((i + 1))" - chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[0]}"-part1) $synced_efi_partition_path vfat defaults 0 0 >> /etc/fstab" + chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $synced_efi_partition_path vfat defaults 0 0 >> /etc/fstab" chroot_execute "mkdir -p $synced_efi_partition_path" chroot_execute "mount $synced_efi_partition_path" From 1cdf975610e463d2f1a501a1aa58a1468acfe840 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 13:25:30 +0200 Subject: [PATCH 83/90] 795a45d Refactoring: Move fstab preparation into separate step --- install-zfs.sh | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 049c4bf..ba6fabe 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1375,11 +1375,25 @@ function install_jail_zfs_packages_UbuntuServer { fi } -function prepare_efi_partition { - # The other mounts are configured/synced in the EFI partitions sync stage. - # - chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[0]}"-part1) /boot/efi vfat defaults 0 0 > /etc/fstab" +function prepare_fstab { + chroot_execute "true > /etc/fstab" + for ((i = 0; i < ${#v_selected_disks[@]}; i++)); do + if (( i == 0 )); then + local mountpoint=/boot/efi + else + local mountpoint=/boot/efi$((i + 1)) + fi + + chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $mountpoint vfat defaults 0 0 >> /etc/fstab" + done + + if (( v_swap_size > 0 )); then + chroot_execute "echo /dev/zvol/$v_rpool_name/swap none swap discard 0 0 >> /etc/fstab" + fi +} + +function prepare_efi_partition { chroot_execute "mkdir -p /boot/efi" chroot_execute "mount /boot/efi" @@ -1416,8 +1430,6 @@ function sync_efi_partitions { for ((i = 1; i < ${#v_selected_disks[@]}; i++)); do local synced_efi_partition_path="/boot/efi$((i + 1))" - chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $synced_efi_partition_path vfat defaults 0 0 >> /etc/fstab" - chroot_execute "mkdir -p $synced_efi_partition_path" chroot_execute "mount $synced_efi_partition_path" @@ -1495,7 +1507,8 @@ function fix_filesystem_mount_ordering { } function configure_remaining_settings { - [[ $v_swap_size -gt 0 ]] && chroot_execute "echo /dev/zvol/$v_rpool_name/swap none swap discard 0 0 >> /etc/fstab" || true + # The swap volume, if specified, is set in the prepare_fstab() step. + chroot_execute "echo RESUME=none > /etc/initramfs-tools/conf.d/resume" } @@ -1598,6 +1611,7 @@ fi invoke "prepare_jail" invoke "install_jail_base_packages" invoke "install_jail_zfs_packages" +invoke "prepare_fstab" invoke "prepare_efi_partition" invoke "configure_and_update_grub" invoke "sync_efi_partitions" From 2455c932b3ee705cbb8513980d54f4ec3cf17212 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 13:49:32 +0200 Subject: [PATCH 84/90] Refactoring: De-inline conditional command in fix_filesystem_mount_ordering() --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index ba6fabe..5a7bb75 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1456,7 +1456,7 @@ function fix_filesystem_mount_ordering { # On Debian, this file may exist already. # - chroot_execute "[[ ! -f /etc/zfs/zed.d/history_event-zfs-list-cacher.sh ]] && ln -s /usr/lib/zfs-linux/zed.d/history_event-zfs-list-cacher.sh /etc/zfs/zed.d/" + chroot_execute "if [[ ! -f /etc/zfs/zed.d/history_event-zfs-list-cacher.sh ]]; then ln -s /usr/lib/zfs-linux/zed.d/history_event-zfs-list-cacher.sh /etc/zfs/zed.d/; fi" # Assumed to be present by the zedlet above on Debian, but missing. # Filed issue: https://github.com/zfsonlinux/zfs/issues/9945. From 9411d3d13af0acc6fba5bdc26ac364f551e060a7 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 16:23:41 +0200 Subject: [PATCH 85/90] fstab: Make EFI partitions `nofail` This was changed by the new procedure, however, currently, the port is not complete, and in the current form, without nofail, if one disk fails, the entire boot will fail. --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 5a7bb75..136a251 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1385,7 +1385,7 @@ function prepare_fstab { local mountpoint=/boot/efi$((i + 1)) fi - chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $mountpoint vfat defaults 0 0 >> /etc/fstab" + chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $mountpoint vfat nofail 0 0 >> /etc/fstab" done if (( v_swap_size > 0 )); then From ca843f1b1572036acbbe1accc4c77e6cc3dc95ca Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 11:34:33 +0200 Subject: [PATCH 86/90] Fix /boot mounts mess (part 1: timeout) --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 136a251..27ba7e7 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1385,7 +1385,7 @@ function prepare_fstab { local mountpoint=/boot/efi$((i + 1)) fi - chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $mountpoint vfat nofail 0 0 >> /etc/fstab" + chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $mountpoint vfat nofail,x-systemd.device-timeout=10 0 0 >> /etc/fstab" done if (( v_swap_size > 0 )); then From e93521b330f72f16f1d4abaa4248400b3ac467cb Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 12:25:11 +0200 Subject: [PATCH 87/90] Fix /boot mounts mess (part 2: /boot dependency) --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 27ba7e7..0f2ebad 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1385,7 +1385,7 @@ function prepare_fstab { local mountpoint=/boot/efi$((i + 1)) fi - chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $mountpoint vfat nofail,x-systemd.device-timeout=10 0 0 >> /etc/fstab" + chroot_execute "echo /dev/disk/by-uuid/$(blkid -s UUID -o value "${v_selected_disks[i]}"-part1) $mountpoint vfat nofail,x-systemd.requires=zfs-mount.service,x-systemd.device-timeout=10 0 0 >> /etc/fstab" done if (( v_swap_size > 0 )); then From 373d9cfc1b72d7308ace7adb300f5e8f01878ff7 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 17:17:23 +0200 Subject: [PATCH 88/90] Revert "Disable Ubuntu Server on mainline, and update README accordingly" This reverts commit 4d18dc14a6b63fdcd9870e33deac8ddfe230ef3d. --- README.md | 4 ++-- install-zfs.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index edb79d2..b4e297d 100644 --- a/README.md +++ b/README.md @@ -74,10 +74,10 @@ then follow the instructions; halfway through the procedure, the GUI installer o ### Ubuntu Server -Ubuntu Server requires a slightly different execution procedure, and the version 0.3.x of the script (the [new procedure](https://openzfs.github.io/openzfs-docs/Getting%20Started/Ubuntu/Ubuntu%2020.04%20Root%20on%20ZFS.html) doesn't directly support Ubuntu Server): +Ubuntu Server requires a slightly different execution procedure: - when the installer welcome screen shows up, tap `Ctrl+Alt+F2`, -- then type `curl -L https://git.io/JGjA6 | sudo bash`. +- then type `curl -L https://git.io/JelI5 | sudo bash`. then follow the instructions. diff --git a/install-zfs.sh b/install-zfs.sh index 0f2ebad..475583e 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -118,7 +118,7 @@ USERDATA/%P mountpoint=/home/%P canmount=on com.ubuntu.zsys:b ' c_zfs_mount_dir=/mnt c_installed_os_mount_dir=/target -declare -A c_supported_linux_distributions=([Ubuntu]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) +declare -A c_supported_linux_distributions=([Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]="19.1 19.2 19.3" [Linuxmint]="20 20.1" [elementary]=5.1) c_temporary_volume_size=12 # gigabytes; large enough - Debian, for example, takes ~8 GiB. c_passphrase_named_pipe=$(dirname "$(mktemp)")/zfs-installer.pp.fifo From fa80ae5ad5eea13092d72841c8d3d40df7dc193d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 17:20:49 +0200 Subject: [PATCH 89/90] Revert "Officially close project" This reverts commit ddb4e5e63cde5f361b5e0e101a643b3726c2bb87. --- README.md | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/README.md b/README.md index b4e297d..719a74c 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ ZFS installer is a shell script program that fully prepares ZFS on a system, and allows an effortless installation of several Ubuntu-based operating systems using their standard installer (or debootstrap, or any custom script). -- [Status](#status) - [Requirements and functionality](#requirements-and-functionality) - [Comparison with Ubuntu built-in installer](#comparison-with-ubuntu-built-in-installer) - [Instructions](#instructions) @@ -16,21 +15,6 @@ ZFS installer is a shell script program that fully prepares ZFS on a system, and - [Help](#help) - [Credits](#credits) -## Status - -**WARNING: THIS PROJECT HAS BEEN PUT ON HOLD, AND IT'S NOT STABLE ANYMORE** - -Working on this type of program is extremely time consuming, for a few reasons: - -1. installers are sometimes updated (even within the same Ubuntu patch version), breaking the program in unpredictable ways; -2. the problem above is compounded by different distros having minor differences in configuration, which again, break the program; -3. it's not possible to automate the testing without a considerably sophisticated tool (which should, in theory, perform the operations on the Ubiquity GUI; this is also impossible for Ubuntu Server); while debootstrap installations can be programmatically tested, the bulk of the bugs are related to the GUI installer -4. I'm the only developer actively working on the project; the ZFS are collaborating with Canonical, and, while the outcome is highly desirable for ZFS, it makes manual execution the only option for even minor customization - -Therefore, I'm closing this project. I may keep developing in order to support my home installation, but I can't give any guarantee. - -I'm open to PR, though. - ## Requirements and functionality The program currently supports: From 884d696442581801bc217b22d50c4d0d4a701642 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Mon, 14 Jun 2021 17:26:38 +0200 Subject: [PATCH 90/90] README: Add section about stability --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 719a74c..aa51525 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ ZFS installer is a shell script program that fully prepares ZFS on a system, and - [Comparison with Ubuntu built-in installer](#comparison-with-ubuntu-built-in-installer) - [Instructions](#instructions) - [Ubuntu Server](#ubuntu-server) +- [Stability](#stability) - [Demo](#demo) - [Unsupported systems/Issues](#unsupported-systemsissues) - [Unattended installations](#unattended-installations) @@ -65,6 +66,16 @@ Ubuntu Server requires a slightly different execution procedure: then follow the instructions. +## Stability + +The project is carefully developed, however, it's practically impossible to guarantee continuous stability, for two reasons: + +1. Linux distributions frequently apply small changes to their installers, even on the same distribution version, +1. automated testing is not feasible; although debootstrap installations could be automated, the bulk of the work is related to the installers, which can't be automated without sophisticated GUI automation, +1. testing is time consuming, so it can be performed on a limited amount of distros at a time. + +Errors due to installer will cause the script to terminate, so, generally speaking, if the script completes, the system has been successfully setup. + ## Demo ![Demo](/demo/demo.gif?raw=true)