mirror of
https://github.com/nchevsky/systemrescue-zfs.git
synced 2025-12-06 07:12:01 +01:00
Add build-zfs-srm, a script to build ZFS support for SystemRescue
This commit is contained in:
parent
53b50c42e1
commit
dbc09046b4
248
airootfs/usr/share/sysrescue/bin/build-zfs-srm
Executable file
248
airootfs/usr/share/sysrescue/bin/build-zfs-srm
Executable file
|
|
@ -0,0 +1,248 @@
|
|||
#! /usr/bin/env bash
|
||||
#
|
||||
# build-zfs-srm - build ZFS support for SystemRescue
|
||||
#
|
||||
# This script builds a SystemRescue Module (SRM) file that adds support
|
||||
# for the ZFS file system to a running instance of SystemRescue.
|
||||
#
|
||||
# Author: Daniel Richard G.
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
#
|
||||
# SystemRescue cannot be distributed with ZFS support due to licensing
|
||||
# issues, but users are free to build ZFS for SystemRescue themselves.
|
||||
# As the build process is non-trivial, it has been elaborated in this
|
||||
# script as a service to the SystemRescue community.
|
||||
#
|
||||
# Usage:
|
||||
# ./build-zfs-srm [--latest]
|
||||
#
|
||||
# By default, this script will build the version of ZFS that was current
|
||||
# (in the AUR repo) at the time of the host system's release. If the
|
||||
# --latest option is given, then it will instead build the latest version
|
||||
# available now (when the script is run).
|
||||
#
|
||||
# Note that the build is performed reproducibly; i.e. re-running the
|
||||
# build in the same environment should yield exactly the same output
|
||||
# file. Thus, builds performed with --latest will produce different
|
||||
# output as new ZFS versions become available, whereas builds performed
|
||||
# without should (in theory) remain unchanged in perpetuity.
|
||||
#
|
||||
# For more information, refer to
|
||||
# https://www.system-rescue.org/scripts/build-zfs-srm/
|
||||
#
|
||||
|
||||
use_latest_zfs=no
|
||||
|
||||
while [ -n "$1" ]
|
||||
do
|
||||
case "$1" in
|
||||
--latest) use_latest_zfs=yes ;;
|
||||
-h|--help) echo "$0: see comment header of script for usage info"; exit 0 ;;
|
||||
-*) echo "$0: error: unrecognized option \"$1\""; exit 1 ;;
|
||||
*) echo "$0: error: unrecognized argument \"$1\""; exit 1 ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Sanity checks
|
||||
if [ ! -f /etc/arch-release ] || ! /usr/bin/pacman --version >/dev/null 2>&1
|
||||
then
|
||||
echo "$0: error: this script must be run on SystemRescue / Arch Linux"
|
||||
exit 1
|
||||
fi
|
||||
if [ $(id -u) -ne 0 ]
|
||||
then
|
||||
echo "$0: error: this script must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
if [ $(awk '$1=="MemAvailable:"{print $2}' /proc/meminfo) -lt 3500000 ] && \
|
||||
! make --version >/dev/null 2>&1
|
||||
then
|
||||
echo "$0: error: not enough RAM available for build, ~4 GB needed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e -u
|
||||
|
||||
# Use this timestamp for reproducibility
|
||||
t_epoch=$(stat -c '%Y' /etc/os-release)
|
||||
t_date=$(date -d @${t_epoch} -R)
|
||||
|
||||
# Are we in an actual live running instance of SystemRescue?
|
||||
# (The expected alternative is an "archlinux" Docker container)
|
||||
in_live_system=$(test "_$(findmnt -n -o SOURCE /)" = _airootfs && echo true || echo false)
|
||||
|
||||
srm_file=/tmp/$(. /etc/os-release && echo ${ID}-${VERSION_ID}-zfs.srm)
|
||||
srm_info=${srm_file%.srm}.txt
|
||||
|
||||
cat <<END
|
||||
==== Building ${srm_file} ====
|
||||
Reproducible build timestamp: ${t_epoch} (${t_date})
|
||||
Running in SystemRescue live system: ${in_live_system}
|
||||
|
||||
END
|
||||
|
||||
run_command()
|
||||
{
|
||||
echo "+ $*" >&2
|
||||
env "$@"
|
||||
}
|
||||
|
||||
# Install some prerequisites
|
||||
run_command pacman -Sy --needed --noconfirm base-devel git
|
||||
|
||||
reinstall_if_missing()
|
||||
{
|
||||
local pkg="$1"
|
||||
local file="$2"
|
||||
test -f ${file} || run_command pacman -S --noconfirm ${pkg}
|
||||
}
|
||||
|
||||
# Many packages in the SystemRescue ISO have had files removed to save on
|
||||
# space, but we will need to restore some of them to build ZFS. Reinstall
|
||||
# the following packages as needed:
|
||||
reinstall_if_missing curl /usr/include/curl/curl.h
|
||||
reinstall_if_missing glibc /usr/lib/libc_nonshared.a
|
||||
reinstall_if_missing libtirpc /usr/include/tirpc/rpc/xdr.h
|
||||
reinstall_if_missing linux-api-headers /usr/include/linux/limits.h
|
||||
reinstall_if_missing openssl /usr/include/openssl/evp.h
|
||||
reinstall_if_missing pam /usr/include/security/pam_modules.h
|
||||
reinstall_if_missing systemd-libs /usr/include/libudev.h
|
||||
reinstall_if_missing util-linux-libs /usr/include/uuid/uuid.h
|
||||
reinstall_if_missing zlib /usr/include/zlib.h
|
||||
|
||||
if ${in_live_system}
|
||||
then
|
||||
# More disk space is available in a tmpfs
|
||||
work_dir=/tmp/zfsbuild
|
||||
else
|
||||
work_dir=/home/zfsbuild
|
||||
fi
|
||||
if [ ! -d ${work_dir} ]
|
||||
then
|
||||
run_command useradd --create-home --home-dir ${work_dir} zfsbuild
|
||||
fi
|
||||
|
||||
run_as_user()
|
||||
{
|
||||
local cmd="$1"
|
||||
setpriv \
|
||||
--reuid=zfsbuild \
|
||||
--regid=zfsbuild \
|
||||
--init-groups \
|
||||
--no-new-privs \
|
||||
--reset-env \
|
||||
bash -e -x -c "${cmd}"
|
||||
}
|
||||
|
||||
cd ${work_dir}
|
||||
|
||||
# Build these two essential ZFS packages from the AUR
|
||||
for aur_pkg in \
|
||||
zfs-dkms \
|
||||
zfs-utils
|
||||
do
|
||||
if [ ! -d ${aur_pkg} ]
|
||||
then
|
||||
run_as_user "git clone https://aur.archlinux.org/${aur_pkg}.git"
|
||||
fi
|
||||
if [ ! -f ${aur_pkg}.repro.stamp -a ${use_latest_zfs} = no ]
|
||||
then
|
||||
run_as_user "cd ${aur_pkg} && rev=\$(git rev-list -n 1 --first-parent --before=@${t_epoch} HEAD) && git checkout \${rev}"
|
||||
touch ${aur_pkg}.repro.stamp
|
||||
fi
|
||||
if [ ! -f ${aur_pkg}.pgp.stamp ]
|
||||
then
|
||||
run_as_user ". ${aur_pkg}/PKGBUILD && gpg --recv-keys \${validpgpkeys[*]}"
|
||||
touch ${aur_pkg}.pgp.stamp
|
||||
fi
|
||||
if [ ! -f ${aur_pkg}.build.stamp ]
|
||||
then
|
||||
run_as_user "cd ${aur_pkg} && makepkg --clean"
|
||||
run_as_user "cd ${aur_pkg} && git rev-parse HEAD" > ${aur_pkg}.commit.txt
|
||||
touch ${aur_pkg}.build.stamp
|
||||
fi
|
||||
done
|
||||
|
||||
# Install more prerequisites
|
||||
run_command pacman -S --needed --noconfirm \
|
||||
dkms linux-lts linux-lts-headers squashfs-tools
|
||||
|
||||
# Tweak DKMS configuration
|
||||
x=/etc/dkms/framework.conf
|
||||
if ! grep -q '^dkms_tree=' $x && ${in_live_system}
|
||||
then
|
||||
# More disk space is available in a tmpfs
|
||||
mkdir -p /tmp/dkms
|
||||
(echo; echo 'dkms_tree="/tmp/dkms"') >> $x
|
||||
fi
|
||||
if ! grep -q '^sign_file=' $x
|
||||
then
|
||||
# Do not sign the kernel modules as this breaks reproducibility
|
||||
(echo; echo 'sign_file="/nope"') >> $x
|
||||
fi
|
||||
|
||||
if ! ${in_live_system}
|
||||
then
|
||||
# The Docker container does not extract/install man pages from
|
||||
# packages, but we want the SRM to include those
|
||||
perl -pi -e '/^NoExtract\s*=/ && m! usr/share/man/! and s/^/#/' \
|
||||
/etc/pacman.conf
|
||||
fi
|
||||
|
||||
# Kernel module builds need the kernel file in the standard location
|
||||
x=/run/archiso/bootmnt/sysresccd/boot/x86_64/vmlinuz
|
||||
y=/boot/vmlinuz-linux-lts
|
||||
if [ -f $x -a ! -f $y ]
|
||||
then
|
||||
run_command ln -s $x $y
|
||||
fi
|
||||
|
||||
# Do not pass the -j option to pahole(1) as it breaks reproducibility
|
||||
# https://github.com/acmel/dwarves/issues/42
|
||||
perl -pi -e 's/^(\s*)(extra_paholeopt=".* -j")/${1}true $2/' \
|
||||
/lib/modules/*-lts/build/scripts/pahole-flags.sh
|
||||
|
||||
# Install the newly-built ZFS packages, and let DKMS do its thing
|
||||
run_command pacman -U --needed --noconfirm \
|
||||
zfs-utils/zfs-utils-*.pkg.tar.zst \
|
||||
zfs-dkms/zfs-dkms-*.pkg.tar.zst
|
||||
|
||||
# Generate list of files in the zfs-utils package
|
||||
pacman -Ql zfs-utils \
|
||||
| sed 's!^zfs-utils /!!' \
|
||||
| grep -v '/$' \
|
||||
> zfs-utils.files.txt
|
||||
|
||||
# mksquashfs(1) uses this
|
||||
export SOURCE_DATE_EPOCH=${t_epoch}
|
||||
|
||||
# Create the SRM file
|
||||
rm -f ${srm_file}
|
||||
(cd / && tar cf - --sort=name \
|
||||
usr/lib/modules/*-lts/updates/dkms/* \
|
||||
usr/lib/modules/*-lts/modules.* \
|
||||
-T ${work_dir}/zfs-utils.files.txt) \
|
||||
| run_command mksquashfs - ${srm_file} -comp xz -tar -exports
|
||||
|
||||
tee ${srm_info} <<END
|
||||
|
||||
==== SystemRescue Module (SRM) build successful ====
|
||||
|
||||
Build host : $(. /etc/os-release && echo ${NAME} ${VERSION_ID})
|
||||
Repro. timestamp : @${t_epoch} (${t_date})
|
||||
zfs-dkms version : $(. zfs-dkms/PKGBUILD && echo ${pkgver}-${pkgrel})
|
||||
* AUR Git commit : $(cat zfs-dkms.commit.txt 2>/dev/null || echo unknown)
|
||||
zfs-utils version: $(. zfs-utils/PKGBUILD && echo ${pkgver}-${pkgrel})
|
||||
* AUR Git commit : $(cat zfs-utils.commit.txt 2>/dev/null || echo unknown)
|
||||
SRM file location: ${srm_file}
|
||||
SRM file size : $(stat -c '%s' ${srm_file}) bytes
|
||||
SRM file SHA-1 : $(sha1sum < ${srm_file} | awk '{print $1}')
|
||||
SRM file SHA-512 : $(sha512sum < ${srm_file} | awk '{print $1}')
|
||||
|
||||
Please visit https://www.system-rescue.org/Modules/ for instructions on
|
||||
loading the SRM file into a running instance of SystemRescue.
|
||||
|
||||
END
|
||||
|
||||
# EOF
|
||||
Loading…
Reference in a new issue