From 29fad3e9b75636239e41d421dd2ef4c97e563523 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 24 Apr 2020 22:29:23 +0200 Subject: [PATCH 01/34] Rename variable `c_lsb_release_log` to more generic `c_os_information_log` --- install-zfs.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 26fa1bb..70b005d 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -46,7 +46,7 @@ c_temporary_volume_size=12G # large enough; Debian, for example, takes ~8 GiB. c_log_dir=$(dirname "$(mktemp)")/zfs-installer c_install_log=$c_log_dir/install.log -c_lsb_release_log=$c_log_dir/lsb_release.log +c_os_information_log=$c_log_dir/os_information.log c_disks_log=$c_log_dir/disks.log c_zfs_module_version_log=$c_log_dir/updated_module_versions.log @@ -194,7 +194,7 @@ function activate_debug { function store_os_distro_information { print_step_info_header - lsb_release --all > "$c_lsb_release_log" + lsb_release --all > "$c_os_information_log" } function set_distribution_data { From cc161c3e92fe9b3306e050c821ed72d05e603bf4 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 24 Apr 2020 22:34:50 +0200 Subject: [PATCH 02/34] Log: Add flavor to O/S information --- install-zfs.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index 70b005d..d960055 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -195,6 +195,11 @@ function store_os_distro_information { print_step_info_header lsb_release --all > "$c_os_information_log" + + # Madness, in order not to force the user to invoke "sudo -E". + # Assumes that the user runs exactly `sudo bash`; it's not a (current) concern if the user runs off specification. + # + perl -lne 'BEGIN { $/ = "\0" } print if /^XDG_CURRENT_DESKTOP=/' /proc/"$PPID"/environ >> "$c_os_information_log" } function set_distribution_data { From 60b83f4a0c7cc45b82e9334375884d0e5e719784 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 24 Apr 2020 22:40:29 +0200 Subject: [PATCH 03/34] Make store_os_distro_information() distro-dependent --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index d960055..36350b5 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1254,7 +1254,7 @@ if [[ $# -ne 0 ]]; then fi activate_debug -store_os_distro_information +distro_dependent_invoke "store_os_distro_information" set_distribution_data check_prerequisites display_intro_banner From 420a2163b1cb4a16e31b365010528d9902e5a4f2 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 24 Apr 2020 22:42:20 +0200 Subject: [PATCH 04/34] Add Debian specific O/S information (full version, including minor part) --- install-zfs.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index 36350b5..ea68ad3 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -202,6 +202,12 @@ function store_os_distro_information { perl -lne 'BEGIN { $/ = "\0" } print if /^XDG_CURRENT_DESKTOP=/' /proc/"$PPID"/environ >> "$c_os_information_log" } +function store_os_distro_information_Debian { + store_os_distro_information + + echo "DEBIAN_VERSION=$(cat /etc/debian_version)" >> "$c_os_information_log" +} + function set_distribution_data { v_linux_distribution="$(lsb_release --id --short)" From 4d5e62dd84c258a907a944d530cd8829ff559c78 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 24 Apr 2020 23:18:37 +0200 Subject: [PATCH 05/34] Set distribution data immediately after activating the debug Required, in order to allow store_os_distro_information() to be distro-dependent! Note that this commit is out of order. It should have been near the top of the PR. --- install-zfs.sh | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index ea68ad3..cd3d342 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -191,6 +191,16 @@ function activate_debug { set -x } +function set_distribution_data { + v_linux_distribution="$(lsb_release --id --short)" + + if [[ "$v_linux_distribution" == "Ubuntu" ]] && grep -q '^Status: install ok installed$' < <(dpkg -s ubuntu-server 2> /dev/null); then + v_linux_distribution="UbuntuServer" + fi + + v_linux_version="$(lsb_release --release --short)" +} + function store_os_distro_information { print_step_info_header @@ -208,16 +218,6 @@ function store_os_distro_information_Debian { echo "DEBIAN_VERSION=$(cat /etc/debian_version)" >> "$c_os_information_log" } -function set_distribution_data { - v_linux_distribution="$(lsb_release --id --short)" - - if [[ "$v_linux_distribution" == "Ubuntu" ]] && grep -q '^Status: install ok installed$' < <(dpkg -s ubuntu-server 2> /dev/null); then - v_linux_distribution="UbuntuServer" - fi - - v_linux_version="$(lsb_release --release --short)" -} - function check_prerequisites { print_step_info_header @@ -1260,8 +1260,8 @@ if [[ $# -ne 0 ]]; then fi activate_debug -distro_dependent_invoke "store_os_distro_information" set_distribution_data +distro_dependent_invoke "store_os_distro_information" check_prerequisites display_intro_banner find_suitable_disks From e481a2aa4cc3b3af0a8b02f68a3ed2b4e20f3d57 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Fri, 24 Apr 2020 23:37:24 +0200 Subject: [PATCH 06/34] Fix shellcheck warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Quoting warnings, related to pool tweaks. previously, they were actually explicitly ignored, which shouldn't have been done 😬. --- install-zfs.sh | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index cd3d342..0cff898 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -16,14 +16,14 @@ set -o nounset # Variables set (indirectly) by the user v_bpool_name= -v_bpool_tweaks= # see defaults below for format +v_bpool_tweaks= # array; see defaults below for format v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) v_linux_distribution_version= v_encrypt_rpool= # 0=false, 1=true v_passphrase= v_root_password= # Debian-only v_rpool_name= -v_rpool_tweaks= # see defaults below for format +v_rpool_tweaks= # array; see defaults below for format declare -a v_selected_disks # (/dev/by-id/disk_id, ...) v_swap_size= # integer v_free_tail_space= # integer @@ -477,15 +477,15 @@ function ask_pool_tweaks { print_step_info_header if [[ ${ZFS_BPOOL_TWEAKS:-} != "" ]]; then - v_bpool_tweaks=$ZFS_BPOOL_TWEAKS + mapfile -d' ' -t v_bpool_tweaks < <(echo -n "$ZFS_BPOOL_TWEAKS") else - v_bpool_tweaks=$(whiptail --inputbox "Insert the tweaks for the boot pool" 30 100 -- "$c_default_bpool_tweaks" 3>&1 1>&2 2>&3) + mapfile -t v_bpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the boot pool" 30 100 -- "$c_default_bpool_tweaks" 3>&1 1>&2 2>&3) fi if [[ ${ZFS_RPOOL_TWEAKS:-} != "" ]]; then - v_rpool_tweaks=$ZFS_RPOOL_TWEAKS + mapfile -d' ' -t v_rpool_tweaks < <(echo -n "$ZFS_RPOOL_TWEAKS") else - v_rpool_tweaks=$(whiptail --inputbox "Insert the tweaks for the root pool" 30 100 -- "$c_default_rpool_tweaks" 3>&1 1>&2 2>&3) + mapfile -t v_rpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the root pool" 30 100 -- "$c_default_rpool_tweaks" 3>&1 1>&2 2>&3) fi print_variables v_bpool_tweaks v_rpool_tweaks @@ -656,20 +656,18 @@ function prepare_disks { # # Stdin is ignored if the encryption is not set (and set via prompt). # - # shellcheck disable=SC2086 # unquoted tweaks variable (splitting is expected) set +x echo -n "$v_passphrase" | zpool create \ "${encryption_options[@]}" \ - $v_rpool_tweaks \ + "${v_rpool_tweaks[@]}" \ -O devices=off -O mountpoint=/ -R "$c_zfs_mount_dir" -f \ "$v_rpool_name" $pools_mirror_option "${rpool_disks_partitions[@]}" set -x # `-d` disable all the pool features (not used here); # - # shellcheck disable=SC2086 # see previous command zpool create \ - $v_bpool_tweaks \ + "${v_bpool_tweaks[@]}" \ -O devices=off -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ "$v_bpool_name" $pools_mirror_option "${bpool_disks_partitions[@]}" From 309c95d3d9553ee625b00694ab7d7802bcecbb61 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 15:23:05 +0200 Subject: [PATCH 07/34] Add `temp` to .gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 9467c65..d3c621c 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -wiki +/temp/ +/wiki/ From cce1c8e3c57dbddc6794bcb1df35ce5224471aa8 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 16:31:32 +0200 Subject: [PATCH 08/34] Minor refactoring/cleaning of the variable declarations --- install-zfs.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 0cff898..3c8fb04 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -13,12 +13,14 @@ set -o nounset # VARIABLES/CONSTANTS ########################################################## +# Variables set by the script + +v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) + # Variables set (indirectly) by the user v_bpool_name= v_bpool_tweaks= # array; see defaults below for format -v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) -v_linux_distribution_version= v_encrypt_rpool= # 0=false, 1=true v_passphrase= v_root_password= # Debian-only From 59bb35851b8eac3b03e599a74358c6812d8aa716 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 18:08:58 +0200 Subject: [PATCH 09/34] Fix pool tweaks definition bugs introduced with the Shellcheck fixes --- install-zfs.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 3c8fb04..370ef48 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -481,13 +481,13 @@ function ask_pool_tweaks { if [[ ${ZFS_BPOOL_TWEAKS:-} != "" ]]; then mapfile -d' ' -t v_bpool_tweaks < <(echo -n "$ZFS_BPOOL_TWEAKS") else - mapfile -t v_bpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the boot pool" 30 100 -- "$c_default_bpool_tweaks" 3>&1 1>&2 2>&3) + mapfile -d' ' -t v_bpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the boot pool" 30 100 -- "$c_default_bpool_tweaks" 3>&1 1>&2 2>&3) fi if [[ ${ZFS_RPOOL_TWEAKS:-} != "" ]]; then mapfile -d' ' -t v_rpool_tweaks < <(echo -n "$ZFS_RPOOL_TWEAKS") else - mapfile -t v_rpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the root pool" 30 100 -- "$c_default_rpool_tweaks" 3>&1 1>&2 2>&3) + mapfile -d' ' -t v_rpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the root pool" 30 100 -- "$c_default_rpool_tweaks" 3>&1 1>&2 2>&3) fi print_variables v_bpool_tweaks v_rpool_tweaks From b3cfe0ae47480745e0437a60b218322a01f862fa Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 18:16:09 +0200 Subject: [PATCH 10/34] Minor refactoring in ask_pool_tweaks() --- install-zfs.sh | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 370ef48..203e739 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -478,17 +478,13 @@ function ask_pool_names { function ask_pool_tweaks { print_step_info_header - if [[ ${ZFS_BPOOL_TWEAKS:-} != "" ]]; then - mapfile -d' ' -t v_bpool_tweaks < <(echo -n "$ZFS_BPOOL_TWEAKS") - else - mapfile -d' ' -t v_bpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the boot pool" 30 100 -- "$c_default_bpool_tweaks" 3>&1 1>&2 2>&3) - fi + local raw_bpool_tweaks=${ZFS_BPOOL_TWEAKS:-$(whiptail --inputbox "Insert the tweaks for the boot pool" 30 100 -- "$c_default_bpool_tweaks" 3>&1 1>&2 2>&3)} - if [[ ${ZFS_RPOOL_TWEAKS:-} != "" ]]; then - mapfile -d' ' -t v_rpool_tweaks < <(echo -n "$ZFS_RPOOL_TWEAKS") - else - mapfile -d' ' -t v_rpool_tweaks < <(whiptail --inputbox "Insert the tweaks for the root pool" 30 100 -- "$c_default_rpool_tweaks" 3>&1 1>&2 2>&3) - fi + mapfile -d' ' -t v_bpool_tweaks < <(echo -n "$raw_bpool_tweaks") + + local raw_rpool_tweaks=${ZFS_RPOOL_TWEAKS:-$(whiptail --inputbox "Insert the tweaks for the root pool" 30 100 -- "$c_default_rpool_tweaks" 3>&1 1>&2 2>&3)} + + mapfile -d' ' -t v_rpool_tweaks < <(echo -n "$raw_rpool_tweaks") print_variables v_bpool_tweaks v_rpool_tweaks } From 0a3dc8b3486541f638c15ac36d7c346d4e59e9b4 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 16:29:50 +0200 Subject: [PATCH 11/34] Make ZFS packages handling based on the avalability of v0.8 on the main repository This is required work for supporting Ubuntu 20.04. --- install-zfs.sh | 70 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 53 insertions(+), 17 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 203e739..7128e0f 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -16,6 +16,7 @@ set -o nounset # Variables set by the script v_linux_distribution= # Debian, Ubuntu, ... WATCH OUT: not necessarily from `lsb_release` (ie. UbuntuServer) +v_zfs_08_in_repository= # 1=true, false otherwise (applies only to Ubuntu-based) # Variables set (indirectly) by the user @@ -324,6 +325,40 @@ If you think this is a bug, please open an issue on https://github.com/saveriomi print_variables v_suitable_disks } +# There are three parameters: +# +# 1. the tools are preinstalled (ie. Ubuntu Desktop based); +# 2. the default repository supports ZFS 0.8 (ie. Ubuntu 20.04+ based); +# 3. the distro provides the precompiled ZFS module (i.e. Ubuntu based, not Debian) +# +# Fortunately, with Debian-specific logic isolated, we need conditionals based only on #2 - see +# install_host_packages() and install_host_packages_UbuntuServer(). +# +function find_zfs_package_requirements { + print_step_info_header + + # WATCH OUT. This is assumed by code in later functions. + # + apt update + + local zfs_package_version + zfs_package_version=$(apt show zfsutils-linux 2> /dev/null | perl -ne 'print $1 if /^Version: (\d+\.\d+)\./') + + if [[ -n $zfs_package_version ]]; then + if [[ ! $zfs_package_version =~ ^0\. ]]; then + >&2 echo "Unsupported ZFS version!: $zfs_package_version" + exit 1 + elif (( $(echo "$zfs_package_version" | cut -d. -f2) >= 8 )); then + v_zfs_08_in_repository=1 + fi + fi +} + +function find_zfs_package_requirements_Debian { + # Do nothing - ZFS packages are handled in a specific way. + : +} + function select_disks { print_step_info_header @@ -493,25 +528,21 @@ function install_host_packages { print_step_info_header if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then - echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections + if [[ $v_zfs_08_in_repository != "1" ]]; then + add-apt-repository --yes ppa:jonathonf/zfs + apt update - add-apt-repository --yes ppa:jonathonf/zfs + # Libelf-dev allows `CONFIG_STACK_VALIDATION` to be set - it's optional, but good to have. + # Module compilation log: `/var/lib/dkms/zfs/0.8.2/build/make.log` (adjust according to version). + # + echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections + apt install --yes libelf-dev zfs-dkms - # Required only on LinuxMint, which doesn't update the apt data when invoking `add-apt-repository`. - # With the current design, it's arguably preferrable to introduce a redundant operation (for - # Ubuntu), rather than adding an almost entirely duplicated function. - # - apt update - - # Libelf-dev allows `CONFIG_STACK_VALIDATION` to be set - it's optional, but good to have. - # Module compilation log: `/var/lib/dkms/zfs/0.8.2/build/make.log` (adjust according to version). - # - apt install --yes libelf-dev zfs-dkms - - systemctl stop zfs-zed - modprobe -r zfs - modprobe zfs - systemctl start zfs-zed + systemctl stop zfs-zed + modprobe -r zfs + modprobe zfs + systemctl start zfs-zed + fi fi zfs --version > "$c_zfs_module_version_log" 2>&1 @@ -565,6 +596,10 @@ function install_host_packages_UbuntuServer { # apt update apt install -y "linux-headers-$(uname -r)" efibootmgr + + if [[ $v_zfs_08_in_repository == "1" ]]; then + apt install --yes zfsutils-linux zfs-modules + fi fi install_host_packages @@ -1261,6 +1296,7 @@ distro_dependent_invoke "store_os_distro_information" check_prerequisites display_intro_banner find_suitable_disks +find_zfs_package_requirements select_disks distro_dependent_invoke "ask_root_password" --noforce From 2e4196708597742b0db81d4c728c737f9acbe4ce Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 19:03:36 +0200 Subject: [PATCH 12/34] Update the README with the 20.04 support information --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 6449afc..ef0f829 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,20 @@ RAID-1 (mirroring) is supported, with any arbitrary number of disks; the boot an It's fairly easy to extend the program to support other Debian-based operating systems (e.g. older/newer Ubuntu's, etc.) - the project is (very) open to feature requests. +### Ubuntu (Desktop) 20.04 support + +The script internally supports Ubuntu 20.04 installations, however, Ubiquity (the Ubuntu Desktop installer) [has an issue](https://bugs.launchpad.net/ubuntu/+source/ubiquity/+bug/1875045) that makes it impossible to prepare ZFS pools ahead of time, which is a requirement for the ZFS installer to work. + +Canonical has shown a lack of consideration for power users/developers, in the (S)Ubiquity department: + +- Subiquity removed support for virtual devices (Ubiquity has it): see [bug](https://bugs.launchpad.net/subiquity/+bug/1811037); +- the Ubuntu Server live CD prevents the user from manually launching the installer, and the installer doesn't allow the user to open a terminal; +- the mentioned issue prevents users from managing their own ZFS partitions when installing the O/S. + +As a consequence, it's hard to predict future developments; the best possible strategy is currently to join to the [20.04 bug in the bug tracker](https://bugs.launchpad.net/ubuntu/+source/ubiquity/+bug/1875045). + +With this in mind, power users can still use the ZFS installer in all the cases, by providing a custom script (via `$ZFS_OS_INSTALLATION_SCRIPT`) that uses `debootstrap` to perform the installation. + ## Advantages over the Ubuntu 19.10 built-in installer Canonical released Ubuntu 19.10, with an experimental ZFS installer. The advantages of this project over the 19.10 installer are: From dd0cc776d966f58ea71634d597d49f2843063ed5 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 19:05:00 +0200 Subject: [PATCH 13/34] Remove "19.10" from the Ubuntu ZFS built-in installer references --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ef0f829..860ce63 100644 --- a/README.md +++ b/README.md @@ -42,9 +42,9 @@ As a consequence, it's hard to predict future developments; the best possible st With this in mind, power users can still use the ZFS installer in all the cases, by providing a custom script (via `$ZFS_OS_INSTALLATION_SCRIPT`) that uses `debootstrap` to perform the installation. -## Advantages over the Ubuntu 19.10 built-in installer +## Advantages over the Ubuntu built-in installer -Canonical released Ubuntu 19.10, with an experimental ZFS installer. The advantages of this project over the 19.10 installer are: +With Ubuntu 19.10, Canonical released an experimental ZFS installer. The advantages of this project over this installer are: 1. on production systems, it's undesirable to use a non-LTS version; 2. the experimental Ubuntu installer is unconfigurable; it generates a fixed setup. From 6b9380ca50c2f17e2b8320b5a2df076c095bd7fe Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 00:21:22 +0200 Subject: [PATCH 14/34] README: Add "Coming soon" Ubuntu 20.04 section --- README.md | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/README.md b/README.md index 860ce63..87b788d 100644 --- a/README.md +++ b/README.md @@ -30,17 +30,7 @@ It's fairly easy to extend the program to support other Debian-based operating s ### Ubuntu (Desktop) 20.04 support -The script internally supports Ubuntu 20.04 installations, however, Ubiquity (the Ubuntu Desktop installer) [has an issue](https://bugs.launchpad.net/ubuntu/+source/ubiquity/+bug/1875045) that makes it impossible to prepare ZFS pools ahead of time, which is a requirement for the ZFS installer to work. - -Canonical has shown a lack of consideration for power users/developers, in the (S)Ubiquity department: - -- Subiquity removed support for virtual devices (Ubiquity has it): see [bug](https://bugs.launchpad.net/subiquity/+bug/1811037); -- the Ubuntu Server live CD prevents the user from manually launching the installer, and the installer doesn't allow the user to open a terminal; -- the mentioned issue prevents users from managing their own ZFS partitions when installing the O/S. - -As a consequence, it's hard to predict future developments; the best possible strategy is currently to join to the [20.04 bug in the bug tracker](https://bugs.launchpad.net/ubuntu/+source/ubiquity/+bug/1875045). - -With this in mind, power users can still use the ZFS installer in all the cases, by providing a custom script (via `$ZFS_OS_INSTALLATION_SCRIPT`) that uses `debootstrap` to perform the installation. +The support is coming soon, due to [an Ubiquity issue](https://bugs.launchpad.net/ubuntu/+source/ubiquity/+bug/1875045). ## Advantages over the Ubuntu built-in installer From d959f9b1bdd419e4dcd1a847557e42861e0f7c10 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 10:00:16 +0200 Subject: [PATCH 15/34] Improve encryption passphrase UX (and simplify logic) The UX has been simplified by implying that the encryption is disabled if the user inputs an empty password (so that there's no need for a screen asking for enablement). The code follows the same logic, relying on the `ZFS_PASSPHRASE` variable being set or not. This is a different behavior from the other variables, for which blank and unset is the same. --- install-zfs.sh | 52 ++++++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 7128e0f..5ed4b03 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -22,8 +22,7 @@ v_zfs_08_in_repository= # 1=true, false otherwise (applies only to Ubuntu-b v_bpool_name= v_bpool_tweaks= # array; see defaults below for format -v_encrypt_rpool= # 0=false, 1=true -v_passphrase= +v_passphrase= # the corresponding var (ZFS_PASSPHRASE) has special behavior (see below) v_root_password= # Debian-only v_rpool_name= v_rpool_tweaks= # array; see defaults below for format @@ -160,7 +159,7 @@ The procedure can be entirely automated via environment variables: - ZFS_OS_INSTALLATION_SCRIPT : path of a script to execute instead of Ubiquity (see dedicated section below) - ZFS_SELECTED_DISKS : full path of the devices to create the pool on, comma-separated - ZFS_ENCRYPT_RPOOL : set 1 to encrypt the pool -- ZFS_PASSPHRASE +- ZFS_PASSPHRASE : set non-blank to encrypt the pool, and blank not to. if unset, it will be asked. - ZFS_DEBIAN_ROOT_PASSWORD - ZFS_BPOOL_NAME - ZFS_RPOOL_NAME @@ -418,29 +417,32 @@ function ask_root_password_Debian { function ask_encryption { print_step_info_header - if [[ "${ZFS_ENCRYPT_RPOOL:-}" == "" ]]; then - if whiptail --yesno 'Do you want to encrypt the root pool?' 30 100; then - v_encrypt_rpool=1 - fi - elif [[ "${ZFS_ENCRYPT_RPOOL:-}" != "0" ]]; then - v_encrypt_rpool=1 - fi set +x - if [[ $v_encrypt_rpool == "1" ]]; then - if [[ ${ZFS_PASSPHRASE:-} != "" ]]; then - v_passphrase="$ZFS_PASSPHRASE" - else - local passphrase_invalid_message= - local passphrase_repeat=- - while [[ "$v_passphrase" != "$passphrase_repeat" || ${#v_passphrase} -lt 8 ]]; do - v_passphrase=$(whiptail --passwordbox "${passphrase_invalid_message}Please enter the passphrase (8 chars min.):" 30 100 3>&1 1>&2 2>&3) - passphrase_repeat=$(whiptail --passwordbox "Please repeat the passphrase:" 30 100 3>&1 1>&2 2>&3) + if [[ -v ZFS_PASSPHRASE ]]; then + v_passphrase=$ZFS_PASSPHRASE + else + local passphrase_repeat=_ + local passphrase_invalid_message= - passphrase_invalid_message="Passphrase too short, or not matching! " - done - fi + while [[ $v_passphrase != "$passphrase_repeat" || ${#v_passphrase} -lt 8 ]]; do + local dialog_message="${passphrase_invalid_message}Please enter the passphrase (8 chars min.): + +Leave blank to keep encryption disabled. +" + + v_passphrase=$(whiptail --passwordbox "$dialog_message" 30 100 3>&1 1>&2 2>&3) + + if [[ -z $v_passphrase ]]; then + break + fi + + passphrase_repeat=$(whiptail --passwordbox "Please repeat the passphrase:" 30 100 3>&1 1>&2 2>&3) + + passphrase_invalid_message="Passphrase too short, or not matching! " + done fi + set -x } @@ -664,10 +666,14 @@ function prepare_disks { local rpool_disks_partitions=() local bpool_disks_partitions=() - if [[ $v_encrypt_rpool == "1" ]]; then + set +x + + if [[ -n $v_passphrase ]]; then encryption_options=(-O "encryption=on" -O "keylocation=prompt" -O "keyformat=passphrase") fi + set -x + for selected_disk in "${v_selected_disks[@]}"; do rpool_disks_partitions+=("${selected_disk}-part3") bpool_disks_partitions+=("${selected_disk}-part2") From 1f4fd9274d3e0ac6ba6b61a6e51c655ce3a80422 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 10:10:53 +0200 Subject: [PATCH 16/34] Exit if `ZFS_PASSPHRASE` is provided, and too small --- install-zfs.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/install-zfs.sh b/install-zfs.sh index 5ed4b03..e718dd8 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -234,6 +234,9 @@ function check_prerequisites { elif [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" != "" && ! -x "$ZFS_OS_INSTALLATION_SCRIPT" ]]; then echo "The custom O/S installation script provided doesn't exist or is not executable!" exit 1 + elif [[ -v ZFS_PASSPHRASE && ${#ZFS_PASSPHRASE} -lt 8 ]]; then + echo "The passphase provided is too short; at least 8 chars required." + exit 1 elif [[ ! -v c_supported_linux_distributions["$v_linux_distribution"] ]]; then echo "This Linux distribution ($v_linux_distribution) is not supported!" exit 1 From 1ff0b98b33166dbb5d7180740a700d9d8fede4b5 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 14:24:32 +0200 Subject: [PATCH 17/34] Check $ZFS_PASSPHRASE only if defined and not blank --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index e718dd8..1620972 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -234,7 +234,7 @@ function check_prerequisites { elif [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" != "" && ! -x "$ZFS_OS_INSTALLATION_SCRIPT" ]]; then echo "The custom O/S installation script provided doesn't exist or is not executable!" exit 1 - elif [[ -v ZFS_PASSPHRASE && ${#ZFS_PASSPHRASE} -lt 8 ]]; then + elif [[ -v ZFS_PASSPHRASE && -n $ZFS_PASSPHRASE && ${#ZFS_PASSPHRASE} -lt 8 ]]; then echo "The passphase provided is too short; at least 8 chars required." exit 1 elif [[ ! -v c_supported_linux_distributions["$v_linux_distribution"] ]]; then From e951c338ecf74d39620edf3b3936952f6aea474d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 14:25:23 +0200 Subject: [PATCH 18/34] Disable logging when checking $ZFS_PASSPHRASE --- install-zfs.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 1620972..3f28e97 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -234,9 +234,6 @@ function check_prerequisites { elif [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" != "" && ! -x "$ZFS_OS_INSTALLATION_SCRIPT" ]]; then echo "The custom O/S installation script provided doesn't exist or is not executable!" exit 1 - elif [[ -v ZFS_PASSPHRASE && -n $ZFS_PASSPHRASE && ${#ZFS_PASSPHRASE} -lt 8 ]]; then - echo "The passphase provided is too short; at least 8 chars required." - exit 1 elif [[ ! -v c_supported_linux_distributions["$v_linux_distribution"] ]]; then echo "This Linux distribution ($v_linux_distribution) is not supported!" exit 1 @@ -244,6 +241,15 @@ function check_prerequisites { echo "This Linux distribution version ($v_linux_version) is not supported; version supported: ${c_supported_linux_distributions["$v_linux_distribution"]}" exit 1 fi + + set +x + + if [[ -v ZFS_PASSPHRASE && -n $ZFS_PASSPHRASE && ${#ZFS_PASSPHRASE} -lt 8 ]]; then + echo "The passphase provided is too short; at least 8 chars required." + exit 1 + fi + + set -x } function display_intro_banner { From 5ed809a0604e083b0464148cac00bdfc374e913d Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 16:09:11 +0200 Subject: [PATCH 19/34] Make ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL use consistent Some comparisons were different. --- install-zfs.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 3f28e97..f605a58 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -580,7 +580,7 @@ function install_host_packages_Debian { function install_host_packages_elementary { print_step_info_header - if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} == "" ]]; then + if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then apt update apt install -y software-properties-common fi @@ -591,7 +591,7 @@ function install_host_packages_elementary { function install_host_packages_UbuntuServer { print_step_info_header - if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} == "" ]]; then + if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then # On Ubuntu Server, `/lib/modules` is a SquashFS mount, which is read-only. # cp -R /lib/modules /tmp/ From dcc41f8ff05462cb3f31e5c9bef91a4062632342 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 09:03:34 +0200 Subject: [PATCH 20/34] Base ZFS packages handling on the avalability of v0.8 on the main repository (2nd part) The same logic needed to be applied to the jail packages. --- install-zfs.sh | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index f605a58..34d2d4e 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -1027,13 +1027,17 @@ function custom_install_operating_system { function install_jail_zfs_packages { print_step_info_header - chroot_execute "add-apt-repository --yes ppa:jonathonf/zfs" + if [[ $v_zfs_08_in_repository != "1" ]]; then + chroot_execute "add-apt-repository --yes ppa:jonathonf/zfs" - chroot_execute "apt update" + chroot_execute "apt update" - chroot_execute 'echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections' + chroot_execute 'echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections' - chroot_execute "apt install --yes libelf-dev zfs-initramfs zfs-dkms grub-efi-amd64-signed shim-signed" + chroot_execute "apt install --yes libelf-dev zfs-initramfs zfs-dkms" + fi + + chroot_execute "apt install --yes grub-efi-amd64-signed shim-signed" } function install_jail_zfs_packages_Debian { @@ -1065,6 +1069,16 @@ function install_jail_zfs_packages_elementary { install_jail_zfs_packages } +function install_jail_zfs_packages_UbuntuServer { + print_step_info_header + + if [[ $v_zfs_08_in_repository == "1" ]]; then + chroot_execute "apt install --yes zfsutils-linux zfs-modules grub-efi-amd64-signed shim-signed" + else + install_jail_zfs_packages + fi +} + function install_and_configure_bootloader { print_step_info_header From ca1d61fd04d97e83c43c3644e5d6c3e93ecad81e Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 12:30:58 +0200 Subject: [PATCH 21/34] Refactoring: Split prepare_disks() into more specific functions --- install-zfs.sh | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 34d2d4e..1a1fadd 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -616,11 +616,9 @@ function install_host_packages_UbuntuServer { install_host_packages } -function prepare_disks { +function setup_partitions { print_step_info_header - # PARTITIONS ######################### - if [[ $v_free_tail_space -eq 0 ]]; then local tail_space_parameter=0 else @@ -668,7 +666,9 @@ function prepare_disks { for selected_disk in "${v_selected_disks[@]}"; do mkfs.fat -F 32 -n EFI "${selected_disk}-part1" done +} +function create_pools { # POOL OPTIONS ####################### local encryption_options=() @@ -718,9 +718,9 @@ function prepare_disks { "${v_bpool_tweaks[@]}" \ -O devices=off -O mountpoint=/boot -R "$c_zfs_mount_dir" -f \ "$v_bpool_name" $pools_mirror_option "${bpool_disks_partitions[@]}" +} - # SWAP ############################### - +function create_swap_volume { if [[ $v_swap_size -gt 0 ]]; then zfs create \ -V "${v_swap_size}G" -b "$(getconf PAGESIZE)" \ @@ -1336,7 +1336,9 @@ ask_pool_names ask_pool_tweaks distro_dependent_invoke "install_host_packages" -prepare_disks +setup_partitions +create_pools +create_swap_volume if [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" == "" ]]; then distro_dependent_invoke "create_temp_volume" From a9cb1600539b2546f1400c4c5267b807fd9b151b Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 12:51:19 +0200 Subject: [PATCH 22/34] Cosmetic: Rename a variable in setup_partitions() --- install-zfs.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 1a1fadd..9c5cf71 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -620,9 +620,9 @@ function setup_partitions { print_step_info_header if [[ $v_free_tail_space -eq 0 ]]; then - local tail_space_parameter=0 + local tail_space_start=0 else - local tail_space_parameter="-${v_free_tail_space}G" + local tail_space_start="-${v_free_tail_space}G" fi for selected_disk in "${v_selected_disks[@]}"; do @@ -632,7 +632,7 @@ function setup_partitions { sgdisk -n1:1M:+"$c_boot_partition_size" -t1:EF00 "$selected_disk" # EFI boot sgdisk -n2:0:+"$c_boot_partition_size" -t2:BF01 "$selected_disk" # Boot pool - sgdisk -n3:0:"$tail_space_parameter" -t3:BF01 "$selected_disk" # Root pool + sgdisk -n3:0:"$tail_space_start" -t3:BF01 "$selected_disk" # Root pool done # The partition symlinks are not immediately created, so we wait. From f39effa6ed76ac5069386b8590c43494283b5320 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 13:58:09 +0200 Subject: [PATCH 23/34] Move the swapfile removal responsibility from the sync stage to the O/S install stage --- install-zfs.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 9c5cf71..e5c30e0 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -810,6 +810,8 @@ Proceed with the configuration as usual, then, at the partitioning stage: if ! mountpoint -q "$c_installed_os_data_mount_dir"; then mount "${v_temp_volume_device}p1" "$c_installed_os_data_mount_dir" fi + + rm -f "$c_installed_os_data_mount_dir/swapfile" } function install_operating_system_Debian { @@ -986,7 +988,7 @@ function sync_os_temp_installation_dir_to_rpool { # error. Without checking, it's not clear why this happens, since Subiquity supposedly finished, # but it's not a necessary file. # - rsync -avX --exclude=/swapfile --exclude=/run/motd.dynamic.new --info=progress2 --no-inc-recursive --human-readable "$c_installed_os_data_mount_dir/" "$c_zfs_mount_dir" | + rsync -avX --exclude=/run/motd.dynamic.new --info=progress2 --no-inc-recursive --human-readable "$c_installed_os_data_mount_dir/" "$c_zfs_mount_dir" | perl -lane 'BEGIN { $/ = "\r"; $|++ } $F[1] =~ /(\d+)%$/ && print $1' | whiptail --gauge "Syncing the installed O/S to the root pool FS..." 30 100 0 From 8bd6e2c3baf121298f03e323d8331fd07f204482 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 17:34:06 +0200 Subject: [PATCH 24/34] Add other entires to rsync exclude list, for the Ubuntu Server installation --- install-zfs.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index e5c30e0..d515bfc 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -984,11 +984,11 @@ function sync_os_temp_installation_dir_to_rpool { # There isn't an exact way to filter out filenames in the rsync output, so we just use a good enough heuristic. # ❤️ Perl ❤️ # - # The motd file needs to be excluded because it vanishes during the rsync execution, causing an - # error. Without checking, it's not clear why this happens, since Subiquity supposedly finished, - # but it's not a necessary file. + # The `/run` files excluded happen in the Ubuntu Server installation. Possibly, the entire directory + # could be ignored, as it's intended to included ephemeral data. + # The `/cdrom` mount is present at least in the Ubuntu Server installation. # - rsync -avX --exclude=/run/motd.dynamic.new --info=progress2 --no-inc-recursive --human-readable "$c_installed_os_data_mount_dir/" "$c_zfs_mount_dir" | + rsync -avX --exclude=/cdrom --exclude=/run/motd.dynamic.new --exclude=/run/udev/queue --info=progress2 --no-inc-recursive --human-readable "$c_installed_os_data_mount_dir/" "$c_zfs_mount_dir" | perl -lane 'BEGIN { $/ = "\r"; $|++ } $F[1] =~ /(\d+)%$/ && print $1' | whiptail --gauge "Syncing the installed O/S to the root pool FS..." 30 100 0 From 2a978e3545825a52629e2282efa0ca028a8239a6 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 17:53:45 +0200 Subject: [PATCH 25/34] README: Remove Debian issue, as it worked fine in the latest tests --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 87b788d..96ed2ba 100644 --- a/README.md +++ b/README.md @@ -62,9 +62,6 @@ the rest is the same as the generic procedure. ### Issues/unsupported systems -As of Feb/2020, Debian 10.x does not install stably on Virtualbox 6.x (but works fine on VMWare 15.5). -For unclear reasons, the EFI partition is not recognized unless the live CD is left in the virtual reader when rebooting after the installation (!). - The Ubuntu Server alternate (non-live) version is not supported, as it's based on the Busybox environment, which lacks several tools used in the installer (apt, rsync...). ### Unattended installations From 35afd2e3b218d5207a3fa6b17a04f2fd2b46370f Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 13:05:34 +0200 Subject: [PATCH 26/34] New installation partition workflow: create/sync the pools after installation This is a significant change, in particular, it: - solves the Ubuntu Server installation, replacing the current hacky solution; - solves the Ubuntu Desktop 20.04 ZFS shenanigans. There is still a pending issue - the zpool expansion doesn't work, so, it leaves 12 GiB unused, but considering the target hardware, it's a minor problem. --- README.md | 2 + install-zfs.sh | 434 +++++++++++++++++++------------------------------ 2 files changed, 169 insertions(+), 267 deletions(-) diff --git a/README.md b/README.md index 96ed2ba..bf045e5 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,8 @@ the rest is the same as the generic procedure. ### Issues/unsupported systems +Due to a current problem with the zpool expansion, 12 GiB of empty space are left at the end of each disk. + The Ubuntu Server alternate (non-live) version is not supported, as it's based on the Busybox environment, which lacks several tools used in the installer (apt, rsync...). ### Unattended installations diff --git a/install-zfs.sh b/install-zfs.sh index d515bfc..65dba23 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -32,7 +32,7 @@ v_free_tail_space= # integer # Variables set during execution -v_temp_volume_device= # /dev/zdN; scope: create_temp_volume -> install_operating_system +v_temp_volume_device= # /dev/zdN; scope: setup_partitions -> sync_os_temp_installation_dir_to_rpool v_suitable_disks=() # (/dev/by-id/disk_id, ...); scope: find_suitable_disks -> select_disk # Constants @@ -41,7 +41,6 @@ c_default_bpool_tweaks="-o ashift=12" c_default_rpool_tweaks="-o ashift=12 -O acltype=posixacl -O compression=lz4 -O dnodesize=auto -O relatime=on -O xattr=sa -O normalization=formD" c_zfs_mount_dir=/mnt c_installed_os_data_mount_dir=/target -c_unpacked_subiquity_dir=/tmp/ubiquity_snap_files declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]=18.04 [UbuntuServer]=18.04 [LinuxMint]=19 [elementary]=5.1) c_boot_partition_size=768M # while 512M are enough for a few kernels, the Ubuntu updater complains after a couple c_temporary_volume_size=12G # large enough; Debian, for example, takes ~8 GiB. @@ -619,6 +618,8 @@ function install_host_packages_UbuntuServer { function setup_partitions { print_step_info_header + local temporary_partition_start=-$((${c_temporary_volume_size:0:-1} + v_free_tail_space))G + if [[ $v_free_tail_space -eq 0 ]]; then local tail_space_start=0 else @@ -630,9 +631,10 @@ function setup_partitions { # wipefs --all "$selected_disk" - sgdisk -n1:1M:+"$c_boot_partition_size" -t1:EF00 "$selected_disk" # EFI boot - sgdisk -n2:0:+"$c_boot_partition_size" -t2:BF01 "$selected_disk" # Boot pool - sgdisk -n3:0:"$tail_space_start" -t3:BF01 "$selected_disk" # Root pool + sgdisk -n1:1M:+"$c_boot_partition_size" -t1:EF00 "$selected_disk" # EFI boot + sgdisk -n2:0:+"$c_boot_partition_size" -t2:BF01 "$selected_disk" # Boot pool + sgdisk -n3:0:"$temporary_partition_start" -t3:BF01 "$selected_disk" # Root pool + sgdisk -n4:0:"$tail_space_start" -t4:8300 "$selected_disk" # Temporary partition done # The partition symlinks are not immediately created, so we wait. @@ -666,6 +668,154 @@ function setup_partitions { for selected_disk in "${v_selected_disks[@]}"; do mkfs.fat -F 32 -n EFI "${selected_disk}-part1" done + + v_temp_volume_device=$(readlink -f "${v_selected_disks[0]}-part4") +} + +function install_operating_system { + print_step_info_header + + local dialog_message='The Ubuntu GUI installer will now be launched. + +Proceed with the configuration as usual, then, at the partitioning stage: + +- check `Something Else` -> `Continue` +- select `'"$v_temp_volume_device"'` -> `Change` + - set `Use as:` to `Ext4` + - check `Format the partition:` + - set `Mount point` to `/` -> `OK` -> `Continue` +- `Install Now` -> `Continue` +- at the end, choose `Continue Testing` +' + + if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then + whiptail --msgbox "$dialog_message" 30 100 + fi + + # The display is restricted only to the owner (`user`), so we need to allow any user to access + # it. + # + sudo -u "$SUDO_USER" env DISPLAY=:0 xhost + + + DISPLAY=:0 ubiquity --no-bootloader + + swapoff -a + + # /target is not always unmounted; the reason is unclear. A possibility is that if there is an + # active swapfile under `/target` and ubiquity fails to unmount /target, it fails silently, + # leaving `/target` mounted. + # For this reason, if it's not mounted, we remount it. + # + # Note that we assume that the user created only one partition on the temp volume, as expected. + # + if ! mountpoint -q "$c_installed_os_data_mount_dir"; then + mount "$v_temp_volume_device" "$c_installed_os_data_mount_dir" + fi + + rm -f "$c_installed_os_data_mount_dir/swapfile" +} + +function install_operating_system_Debian { + print_step_info_header + + # The temporary volume size displayed is an approximation of the format used by the installer, + # but it's acceptable - the complexity required is not worth (eg. converting hypothetical units, + # etc.). + # + local dialog_message='The Debian GUI installer will now be launched. + +Proceed with the configuration as usual, then, at the partitioning stage: + +- check `Manual partitioning` -> `Next` +- click on `'"${v_temp_volume_device}"'` in the filesystems panel -> `Edit` + - click on `Format` + - set `Mount Point` to `/` -> `OK` +- `Next` +- follow through the installation (ignore the EFI partition warning) +- at the end, uncheck `Restart now`, and click `Done` +' + + if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then + whiptail --msgbox "$dialog_message" 30 100 + fi + + # See install_operating_system(). + # + sudo -u "$SUDO_USER" env DISPLAY=:0 xhost + + + DISPLAY=:0 calamares + + mkdir -p "$c_installed_os_data_mount_dir" + + # Note how in Debian, for reasons currenly unclear, the mount fails if the partition is passed; + # it requires the device to be passed. + # + mount "${v_temp_volume_device}" "$c_installed_os_data_mount_dir" + + # We don't use chroot()_execute here, as it works on $c_zfs_mount_dir (which is synced on a + # later stage). + # + set +x + chroot "$c_installed_os_data_mount_dir" bash -c "echo root:$(printf "%q" "$v_root_password") | chpasswd" + set -x + + # The installer doesn't set the network interfaces, so, for convenience, we do it. + # + for interface in $(ip addr show | perl -lne '/^\d+: (?!lo:)(\w+)/ && print $1' ); do + cat > "$c_installed_os_data_mount_dir/etc/network/interfaces.d/$interface" < `Done` +- select `'"$v_temp_volume_device"'` -> `Edit` + - set `Format:` to `ext4` (mountpoint will be automatically selected) + - click `Save` +- click `Done` -> `Continue` (ignore warning) +- follow through the installation, until the end (after the updates are applied) +- switch back to this terminal (Ctrl+Alt+F2), and continue (tap Enter) + +Do not continue in this terminal (tap Enter) now! + +You can switch anytime to this terminal, and back, in order to read the instructions. +' + + whiptail --msgbox "$dialog_message" 30 100 + + swapoff -a + + # See note in install_operating_system(). It's not clear whether this is required on Ubuntu + # Server, but it's better not to take risks. + # + if ! mountpoint -q "$c_installed_os_data_mount_dir"; then + mount "${v_temp_volume_device}p2" "$c_installed_os_data_mount_dir" + fi + + rm -f "$c_installed_os_data_mount_dir"/swap.img +} + +function custom_install_operating_system { + print_step_info_header + + sudo "$ZFS_OS_INSTALLATION_SCRIPT" } function create_pools { @@ -731,252 +881,6 @@ function create_swap_volume { fi } -function create_temp_volume { - print_step_info_header - - zfs create -V "$c_temporary_volume_size" "$v_rpool_name/os-install-temp" - - # The volume may not be immediately available; for reference, "/dev/zvol/.../os-install-temp" - # is a standard file, which turns into symlink once the volume is available. See #8. - # - udevadm settle --timeout "$c_udevadm_settle_timeout" || true - - v_temp_volume_device=$(readlink -f "/dev/zvol/$v_rpool_name/os-install-temp") - - sgdisk -n1:0:0 -t1:8300 "$v_temp_volume_device" - - udevadm settle --timeout "$c_udevadm_settle_timeout" || true -} - -# Differently from Ubuntu, the installer (Calamares) requires a filesystem to be ready. -# -function create_temp_volume_Debian { - print_step_info_header - - create_temp_volume - - mkfs.ext4 -F "$v_temp_volume_device" -} - -# Let Subiquity take care of the partitions/FSs; the current patch allow the installer to handle -# only virtual block devices, not partitions belonging to them. -# -function create_temp_volume_UbuntuServer { - print_step_info_header - - zfs create -V "$c_temporary_volume_size" "$v_rpool_name/os-install-temp" - - udevadm settle --timeout "$c_udevadm_settle_timeout" || true - - v_temp_volume_device=$(readlink -f "/dev/zvol/$v_rpool_name/os-install-temp") -} - -function install_operating_system { - print_step_info_header - - local dialog_message='The Ubuntu GUI installer will now be launched. - -Proceed with the configuration as usual, then, at the partitioning stage: - -- check `Something Else` -> `Continue` -- select `'"$v_temp_volume_device"p1'` -> `Change` - - set `Use as:` to `Ext4` - - check `Format the partition:` - - set `Mount point` to `/` -> `OK` -- `Install Now` -> `Continue` -- at the end, choose `Continue Testing` -' - - if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then - whiptail --msgbox "$dialog_message" 30 100 - fi - - # The display is restricted only to the owner (`user`), so we need to allow any user to access - # it. - # - sudo -u "$SUDO_USER" env DISPLAY=:0 xhost + - - DISPLAY=:0 ubiquity --no-bootloader - - swapoff -a - - # /target is not always unmounted; the reason is unclear. A possibility is that if there is an - # active swapfile under `/target` and ubiquity fails to unmount /target, it fails silently, - # leaving `/target` mounted. - # For this reason, if it's not mounted, we remount it. - # - # Note that we assume that the user created only one partition on the temp volume, as expected. - # - if ! mountpoint -q "$c_installed_os_data_mount_dir"; then - mount "${v_temp_volume_device}p1" "$c_installed_os_data_mount_dir" - fi - - rm -f "$c_installed_os_data_mount_dir/swapfile" -} - -function install_operating_system_Debian { - print_step_info_header - - # The temporary volume size displayed is an approximation of the format used by the installer, - # but it's acceptable - the complexity required is not worth (eg. converting hypothetical units, - # etc.). - # - local dialog_message='The Debian GUI installer will now be launched. - -Proceed with the configuration as usual, then, at the partitioning stage: - -- check `Manual partitioning` -> `Next` -- set `Storage device` to `Unknown - '"${c_temporary_volume_size}"' '"${v_temp_volume_device}"'` -- click on `'"${v_temp_volume_device}"'` in the filesystems panel -> `Edit` - - set `Mount Point` to `/` -> `OK` -- `Next` -- follow through the installation (ignore the EFI partition warning) -- at the end, uncheck `Restart now`, and click `Done` -' - - if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then - whiptail --msgbox "$dialog_message" 30 100 - fi - - # See install_operating_system(). - # - sudo -u "$SUDO_USER" env DISPLAY=:0 xhost + - - DISPLAY=:0 calamares - - mkdir -p "$c_installed_os_data_mount_dir" - - # Note how in Debian, for reasons currenly unclear, the mount fails if the partition is passed; - # it requires the device to be passed. - # - mount "${v_temp_volume_device}" "$c_installed_os_data_mount_dir" - - # We don't use chroot()_execute here, as it works on $c_zfs_mount_dir (which is synced on a - # later stage). - # - set +x - chroot "$c_installed_os_data_mount_dir" bash -c "echo root:$(printf "%q" "$v_root_password") | chpasswd" - set -x - - # The installer doesn't set the network interfaces, so, for convenience, we do it. - # - for interface in $(ip addr show | perl -lne '/^\d+: (?!lo:)(\w+)/ && print $1' ); do - cat > "$c_installed_os_data_mount_dir/etc/network/interfaces.d/$interface" < if re.match('^/devices/virtual(?!/block/$zfs_volume_name)', data.get('DEVPATH', '')): -DIFF - - patch -p1 "$c_unpacked_subiquity_dir/lib/python3.6/site-packages/probert/storage.py" << DIFF -18a19 -> import re -85c86 -< return self.devpath.startswith('/devices/virtual/') ---- -> return re.match('^/devices/virtual/(?!block/$zfs_volume_name)', self.devpath) -DIFF - - patch -p1 "$c_unpacked_subiquity_dir/lib/python3.6/site-packages/curtin/block/__init__.py" << 'DIFF' -116c116 -< for dev_type in ['bcache', 'nvme', 'mmcblk', 'cciss', 'mpath', 'md']: ---- -> for dev_type in ['bcache', 'nvme', 'mmcblk', 'cciss', 'mpath', 'md', 'zd']: -DIFF - - patch -p1 "$c_unpacked_subiquity_dir/lib/python3.6/site-packages/subiquity/ui/views/installprogress.py" << 'DIFF' -diff lib/python3.6/site-packages/subiquity/ui/views/installprogress.py{.bak,} -47a48,49 -> self.exit_btn = cancel_btn( -> _("Exit To Shell"), on_press=self.quit) -121c123 -< btns = [self.view_log_btn, self.reboot_btn] ---- -> btns = [self.view_log_btn, self.exit_btn, self.reboot_btn] -133a136,138 -> def quit(self, btn): -> self.controller.quit() -> -DIFF - - snap stop subiquity - umount "/snap/subiquity/$subiquity_id" - - # Possibly, we could even just symlink, however, since we're running everything in memory, 200+ - # MB of savings are meaningful. - # - mksquashfs "$c_unpacked_subiquity_dir" "/var/lib/snapd/snaps/subiquity_$subiquity_id.snap" -noappend -always-use-fragments - rm -rf "$c_unpacked_subiquity_dir" - - # O/S Installation - # - # Subiquity is designed to prevent the user from opening a terminal, which is (to say the least) - # incongruent with the audience. - - local dialog_message='The Ubuntu Server installer (Subiquity) will now be launched. - -Proceed with the configuration as usual, then, at the partitioning stage: - -- select `Use an entire disk` -- select `'"$v_temp_volume_device"'` -- `Done` -> `Continue` (ignore the warning) -- follow through the installation -- after the security updates are installed, exit to the shell, and follow up with the ZFS installer -' - - if [[ ${ZFS_NO_INFO_MESSAGES:-} == "" ]]; then - whiptail --msgbox "$dialog_message" 30 100 - fi - - # When not running via `snap start` (which we can't, otherwise it runs in the other terminal), - # the binaries are not found, so we manually add them to the path. - # - # Running with `--bootloader=none` currently crashes Subiquity, possibly due to a bug (missing - # `lszdev` binary) - see https://bugs.launchpad.net/subiquity/+bug/1857556. - # - mount "/var/lib/snapd/snaps/subiquity_$subiquity_id.snap" "/snap/subiquity/$subiquity_id" - PATH="/snap/subiquity/$subiquity_id/bin:/snap/subiquity/$subiquity_id/usr/bin:$PATH" snap run subiquity - - swapoff -a - - # See note in install_operating_system(). It's not clear whether this is required on Ubuntu - # Server, but it's better not to take risks. - # - if ! mountpoint -q "$c_installed_os_data_mount_dir"; then - mount "${v_temp_volume_device}p2" "$c_installed_os_data_mount_dir" - fi - - rm -f "$c_installed_os_data_mount_dir"/swap.img -} - function sync_os_temp_installation_dir_to_rpool { print_step_info_header @@ -1002,10 +906,14 @@ function sync_os_temp_installation_dir_to_rpool { umount "$c_installed_os_data_mount_dir" } -function destroy_temp_volume { +function remove_temp_partition_and_expand_rpool { print_step_info_header - zfs destroy "$v_rpool_name/os-install-temp" + parted -s "${v_selected_disks[0]}" rm 4 + + for selected_disk in "${v_selected_disks[@]}"; do + zpool online -e "$v_rpool_name" "$selected_disk-part3" + done } function prepare_jail { @@ -1018,12 +926,6 @@ function prepare_jail { chroot_execute 'echo "nameserver 8.8.8.8" >> /etc/resolv.conf' } -function custom_install_operating_system { - print_step_info_header - - sudo "$ZFS_OS_INSTALLATION_SCRIPT" -} - # See install_host_packages() for some comments. # function install_jail_zfs_packages { @@ -1339,22 +1241,20 @@ ask_pool_tweaks distro_dependent_invoke "install_host_packages" setup_partitions -create_pools -create_swap_volume if [[ "${ZFS_OS_INSTALLATION_SCRIPT:-}" == "" ]]; then - distro_dependent_invoke "create_temp_volume" - # Includes the O/S extra configuration, if necessary (network, root pwd, etc.) distro_dependent_invoke "install_operating_system" - - sync_os_temp_installation_dir_to_rpool - destroy_temp_volume - prepare_jail else custom_install_operating_system fi +create_pools +create_swap_volume +sync_os_temp_installation_dir_to_rpool +remove_temp_partition_and_expand_rpool + +prepare_jail distro_dependent_invoke "install_jail_zfs_packages" distro_dependent_invoke "install_and_configure_bootloader" sync_efi_partitions From 54545282499e8bbd6bcec17713a8dcc3bb42705c Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 18:09:56 +0200 Subject: [PATCH 27/34] Clarify a concept in the sync stage --- install-zfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install-zfs.sh b/install-zfs.sh index 65dba23..8fa42f5 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -890,7 +890,7 @@ function sync_os_temp_installation_dir_to_rpool { # # The `/run` files excluded happen in the Ubuntu Server installation. Possibly, the entire directory # could be ignored, as it's intended to included ephemeral data. - # The `/cdrom` mount is present at least in the Ubuntu Server installation. + # The `/cdrom` mount is present in the Ubuntu Server installation, but not in the Ubuntu Desktop. # rsync -avX --exclude=/cdrom --exclude=/run/motd.dynamic.new --exclude=/run/udev/queue --info=progress2 --no-inc-recursive --human-readable "$c_installed_os_data_mount_dir/" "$c_zfs_mount_dir" | perl -lane 'BEGIN { $/ = "\r"; $|++ } $F[1] =~ /(\d+)%$/ && print $1' | From 9aa3c09162a00f66ea6e290bd521f63996490e82 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sat, 25 Apr 2020 23:09:10 +0200 Subject: [PATCH 28/34] Allow multiple supported versions of a given O/S --- install-zfs.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 8fa42f5..9edf503 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -222,6 +222,8 @@ function store_os_distro_information_Debian { function check_prerequisites { print_step_info_header + local distro_version_regex=\\b${v_linux_version//./\\.}\\b + # shellcheck disable=SC2116 # `=~ $(echo ...)` causes a warning; see https://git.io/Je2QP. # if [[ ! -d /sys/firmware/efi ]]; then @@ -236,8 +238,8 @@ function check_prerequisites { elif [[ ! -v c_supported_linux_distributions["$v_linux_distribution"] ]]; then echo "This Linux distribution ($v_linux_distribution) is not supported!" exit 1 - elif [[ ! $v_linux_version =~ $(echo "^${c_supported_linux_distributions["$v_linux_distribution"]}\\b") ]]; then - echo "This Linux distribution version ($v_linux_version) is not supported; version supported: ${c_supported_linux_distributions["$v_linux_distribution"]}" + elif [[ ! ${c_supported_linux_distributions["$v_linux_distribution"]} =~ $distro_version_regex ]]; then + echo "This Linux distribution version ($v_linux_version) is not supported; supported versions: ${c_supported_linux_distributions["$v_linux_distribution"]}" exit 1 fi From abda370a6ab60e09f028192e1f600eeb86d434df Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 17:47:26 +0200 Subject: [PATCH 29/34] Add support for Ubuntu 20.04 Only minor (but a PITA nonetheless) package-related issues. --- README.md | 6 +----- install-zfs.sh | 11 ++++++++++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index bf045e5..bdaafcf 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ I'll keep using it on my systems, so I'll update the code if required by my use The program currently supports: -- Ubuntu Desktop 18.04.x Live +- Ubuntu Desktop 18.04.x/20.04 Live - Ubuntu Server 18.04.4 Live - Linux Mint 19.x - Debian 10.x Live (desktop environment required) @@ -28,10 +28,6 @@ RAID-1 (mirroring) is supported, with any arbitrary number of disks; the boot an It's fairly easy to extend the program to support other Debian-based operating systems (e.g. older/newer Ubuntu's, etc.) - the project is (very) open to feature requests. -### Ubuntu (Desktop) 20.04 support - -The support is coming soon, due to [an Ubiquity issue](https://bugs.launchpad.net/ubuntu/+source/ubiquity/+bug/1875045). - ## Advantages over the Ubuntu built-in installer With Ubuntu 19.10, Canonical released an experimental ZFS installer. The advantages of this project over this installer are: diff --git a/install-zfs.sh b/install-zfs.sh index 9edf503..995720c 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -41,7 +41,7 @@ c_default_bpool_tweaks="-o ashift=12" c_default_rpool_tweaks="-o ashift=12 -O acltype=posixacl -O compression=lz4 -O dnodesize=auto -O relatime=on -O xattr=sa -O normalization=formD" c_zfs_mount_dir=/mnt c_installed_os_data_mount_dir=/target -declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]=18.04 [UbuntuServer]=18.04 [LinuxMint]=19 [elementary]=5.1) +declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]="18.04 20.04" [UbuntuServer]=18.04 [LinuxMint]=19 [elementary]=5.1) c_boot_partition_size=768M # while 512M are enough for a few kernels, the Ubuntu updater complains after a couple c_temporary_volume_size=12G # large enough; Debian, for example, takes ~8 GiB. @@ -941,6 +941,15 @@ function install_jail_zfs_packages { chroot_execute 'echo "zfs-dkms zfs-dkms/note-incompatible-licenses note true" | debconf-set-selections' chroot_execute "apt install --yes libelf-dev zfs-initramfs zfs-dkms" + else + # Oddly, on a 20.04 live session, the zfs tools are installed, but they are not associated to a package: + # + # - `dpkg -S $(which zpool)` -> nothing + # - `aptitude search ~izfs | awk '{print $2}' | xargs echo` -> libzfs2linux zfs-initramfs zfs-zed zfsutils-linux + # + # The pacakges are not installed by default, so we install them. + # + chroot_execute "apt install --yes libzfs2linux zfs-initramfs zfs-zed zfsutils-linux" fi chroot_execute "apt install --yes grub-efi-amd64-signed shim-signed" From 309003832671389f5801c9ea03f5fa9a100969dd Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 19:00:24 +0200 Subject: [PATCH 30/34] Unmount `/target` submounts before `rsync`ing, and also skip `/run` See comments for the details. --- install-zfs.sh | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 995720c..cb8428a 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -886,18 +886,8 @@ function create_swap_volume { function sync_os_temp_installation_dir_to_rpool { print_step_info_header - # Extended attributes are not used on a standard Ubuntu installation, however, this needs to be generic. - # There isn't an exact way to filter out filenames in the rsync output, so we just use a good enough heuristic. - # ❤️ Perl ❤️ + # On Ubuntu Server, `/boot/efi` and `/cdrom` (!!!) are mounted, but they're not needed. # - # The `/run` files excluded happen in the Ubuntu Server installation. Possibly, the entire directory - # could be ignored, as it's intended to included ephemeral data. - # The `/cdrom` mount is present in the Ubuntu Server installation, but not in the Ubuntu Desktop. - # - rsync -avX --exclude=/cdrom --exclude=/run/motd.dynamic.new --exclude=/run/udev/queue --info=progress2 --no-inc-recursive --human-readable "$c_installed_os_data_mount_dir/" "$c_zfs_mount_dir" | - perl -lane 'BEGIN { $/ = "\r"; $|++ } $F[1] =~ /(\d+)%$/ && print $1' | - whiptail --gauge "Syncing the installed O/S to the root pool FS..." 30 100 0 - local mount_dir_submounts mount_dir_submounts=$(mount | MOUNT_DIR="${c_installed_os_data_mount_dir%/}" perl -lane 'print $F[2] if $F[2] =~ /$ENV{MOUNT_DIR}\//') @@ -905,6 +895,25 @@ function sync_os_temp_installation_dir_to_rpool { umount "$mount_dir" done + # Extended attributes are not used on a standard Ubuntu installation, however, this needs to be generic. + # There isn't an exact way to filter out filenames in the rsync output, so we just use a good enough heuristic. + # ❤️ Perl ❤️ + # + # `/run` is not needed (with an exception), and in Ubuntu Server it's actually a nuisance, since + # some files vanish while syncing. Debian is well-behaved, and `/run` is empty. + # + rsync -avX --exclude=/run --info=progress2 --no-inc-recursive --human-readable "$c_installed_os_data_mount_dir/" "$c_zfs_mount_dir" | + perl -lane 'BEGIN { $/ = "\r"; $|++ } $F[1] =~ /(\d+)%$/ && print $1' | + whiptail --gauge "Syncing the installed O/S to the root pool FS..." 30 100 0 + + mkdir "$c_zfs_mount_dir/run" + + # Required destination of symlink `/etc/resolv.conf`, present in Ubuntu systems (not Debian). + # + if [[ -d $c_installed_os_data_mount_dir/run/systemd/resolve ]]; then + rsync -av --relative "$c_installed_os_data_mount_dir/run/./systemd/resolve" "$c_zfs_mount_dir/run" + fi + umount "$c_installed_os_data_mount_dir" } From a511d755a6aaf8409b325e9e38b857320019cc38 Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 20:46:13 +0200 Subject: [PATCH 31/34] README: Correct supported Ubuntu Server version With the new zpools workflow, all the Ubuntu Server versions are (ahem likely) supported. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bdaafcf..14d6d50 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ I'll keep using it on my systems, so I'll update the code if required by my use The program currently supports: - Ubuntu Desktop 18.04.x/20.04 Live -- Ubuntu Server 18.04.4 Live +- Ubuntu Server 18.04.x Live - Linux Mint 19.x - Debian 10.x Live (desktop environment required) - ElementaryOS 5.1 From 690ea136cc1c8b155c2d084d671b31d1df830efd Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 21:23:30 +0200 Subject: [PATCH 32/34] Improve/fix a comment in install_jail_zfs_packages() --- install-zfs.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index cb8428a..7f0dcc1 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -951,12 +951,13 @@ function install_jail_zfs_packages { chroot_execute "apt install --yes libelf-dev zfs-initramfs zfs-dkms" else - # Oddly, on a 20.04 live session, the zfs tools are installed, but they are not associated to a package: + # Oddly, on a 20.04 Ubuntu Desktop live session, the zfs tools are installed, but they are not + # associated to a package: # # - `dpkg -S $(which zpool)` -> nothing # - `aptitude search ~izfs | awk '{print $2}' | xargs echo` -> libzfs2linux zfs-initramfs zfs-zed zfsutils-linux # - # The pacakges are not installed by default, so we install them. + # The packages are not installed by default, so we install them. # chroot_execute "apt install --yes libzfs2linux zfs-initramfs zfs-zed zfsutils-linux" fi From 1e709c77654ca91393bdcc147c6a2c382060f57c Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 21:12:00 +0200 Subject: [PATCH 33/34] Semantic change: invert 0.8 module condition in install_host_packages The 0.8 availability test is the global, most significant. --- install-zfs.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-zfs.sh b/install-zfs.sh index 7f0dcc1..be76022 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -539,8 +539,8 @@ function ask_pool_tweaks { function install_host_packages { print_step_info_header - if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then - if [[ $v_zfs_08_in_repository != "1" ]]; then + if [[ $v_zfs_08_in_repository != "1" ]]; then + if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then add-apt-repository --yes ppa:jonathonf/zfs apt update From 141c11f0b035aa275e87638b4f46d63d63a0155f Mon Sep 17 00:00:00 2001 From: Saverio Miroddi Date: Sun, 26 Apr 2020 20:46:49 +0200 Subject: [PATCH 34/34] Add support for Ubuntu 20.04 Server Minimal change(s) required, but a PITA as always. --- README.md | 2 +- install-zfs.sh | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 14d6d50..f8706c0 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ I'll keep using it on my systems, so I'll update the code if required by my use The program currently supports: - Ubuntu Desktop 18.04.x/20.04 Live -- Ubuntu Server 18.04.x Live +- Ubuntu Server 18.04.x/20.04 Live - Linux Mint 19.x - Debian 10.x Live (desktop environment required) - ElementaryOS 5.1 diff --git a/install-zfs.sh b/install-zfs.sh index be76022..18a3e9c 100755 --- a/install-zfs.sh +++ b/install-zfs.sh @@ -41,7 +41,7 @@ c_default_bpool_tweaks="-o ashift=12" c_default_rpool_tweaks="-o ashift=12 -O acltype=posixacl -O compression=lz4 -O dnodesize=auto -O relatime=on -O xattr=sa -O normalization=formD" c_zfs_mount_dir=/mnt c_installed_os_data_mount_dir=/target -declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]="18.04 20.04" [UbuntuServer]=18.04 [LinuxMint]=19 [elementary]=5.1) +declare -A c_supported_linux_distributions=([Debian]=10 [Ubuntu]="18.04 20.04" [UbuntuServer]="18.04 20.04" [LinuxMint]=19 [elementary]=5.1) c_boot_partition_size=768M # while 512M are enough for a few kernels, the Ubuntu updater complains after a couple c_temporary_volume_size=12G # large enough; Debian, for example, takes ~8 GiB. @@ -592,7 +592,14 @@ function install_host_packages_elementary { function install_host_packages_UbuntuServer { print_step_info_header - if [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then + if [[ $v_zfs_08_in_repository == "1" ]]; then + apt install --yes zfsutils-linux + + zfs --version > "$c_zfs_module_version_log" 2>&1 + elif [[ ${ZFS_SKIP_LIVE_ZFS_MODULE_INSTALL:-} != "1" ]]; then + # This is not needed on UBS 20.04, which has the modules built-in - incidentally, if attempted, + # it will cause /dev/disk/by-id changes not to be recognized. + # # On Ubuntu Server, `/lib/modules` is a SquashFS mount, which is read-only. # cp -R /lib/modules /tmp/ @@ -609,12 +616,8 @@ function install_host_packages_UbuntuServer { apt update apt install -y "linux-headers-$(uname -r)" efibootmgr - if [[ $v_zfs_08_in_repository == "1" ]]; then - apt install --yes zfsutils-linux zfs-modules - fi + install_host_packages fi - - install_host_packages } function setup_partitions { @@ -998,7 +1001,7 @@ function install_jail_zfs_packages_UbuntuServer { print_step_info_header if [[ $v_zfs_08_in_repository == "1" ]]; then - chroot_execute "apt install --yes zfsutils-linux zfs-modules grub-efi-amd64-signed shim-signed" + chroot_execute "apt install --yes zfsutils-linux zfs-initramfs grub-efi-amd64-signed shim-signed" else install_jail_zfs_packages fi