aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbuild.sh164
-rwxr-xr-xcustom/install-archzfs999
-rwxr-xr-xscripts/test-vm.sh102
3 files changed, 773 insertions, 492 deletions
diff --git a/build.sh b/build.sh
index 012455e..7fdbee7 100755
--- a/build.sh
+++ b/build.sh
@@ -1,6 +1,9 @@
#!/bin/bash
# build.sh - Build the custom Arch ZFS installation ISO
# Must be run as root
+#
+# Uses linux-lts kernel with zfs-dkms from archzfs.com repository.
+# DKMS builds ZFS from source, ensuring it always matches the kernel version.
set -e
@@ -9,7 +12,9 @@ PROFILE_DIR="$SCRIPT_DIR/profile"
WORK_DIR="$SCRIPT_DIR/work"
OUT_DIR="$SCRIPT_DIR/out"
CUSTOM_DIR="$SCRIPT_DIR/custom"
-ZFS_PKG_DIR="$SCRIPT_DIR/zfs-packages"
+
+# Live ISO root password (for SSH access during testing/emergencies)
+LIVE_ROOT_PASSWORD="archzfs"
# Colors for output
RED='\033[0;31m'
@@ -30,40 +35,6 @@ command -v mkarchiso >/dev/null 2>&1 || {
pacman -Sy --noconfirm archiso
}
-# Get current kernel version
-KERNEL_VER=$(pacman -Si linux | grep Version | awk '{print $3}')
-info "Current Arch kernel version: $KERNEL_VER"
-
-# Download ZFS packages from GitHub releases
-info "Downloading ZFS packages for kernel $KERNEL_VER..."
-mkdir -p "$ZFS_PKG_DIR"
-
-# Find matching ZFS packages from experimental release
-ZFS_LINUX_URL=$(curl -s https://api.github.com/repos/archzfs/archzfs/releases/tags/experimental | \
- jq -r ".assets[] | select(.name | contains(\"zfs-linux-\") and contains(\"${KERNEL_VER}\") and (contains(\"-headers\") | not) and contains(\".pkg.tar.zst\") and (contains(\".sig\") | not)) | .browser_download_url" | head -1)
-
-ZFS_UTILS_URL=$(curl -s https://api.github.com/repos/archzfs/archzfs/releases/tags/experimental | \
- jq -r '.assets[] | select(.name | contains("zfs-utils-") and contains(".pkg.tar.zst") and (contains(".sig") | not) and (contains("debug") | not)) | .browser_download_url' | head -1)
-
-if [[ -z "$ZFS_LINUX_URL" ]]; then
- warn "No ZFS package found for kernel $KERNEL_VER in experimental"
- warn "Checking other releases..."
-
- # Try to find any recent zfs-linux package
- ZFS_LINUX_URL=$(curl -s https://api.github.com/repos/archzfs/archzfs/releases | \
- jq -r ".[].assets[] | select(.name | contains(\"zfs-linux-\") and contains(\"6.18\") and (contains(\"-headers\") | not) and contains(\".pkg.tar.zst\") and (contains(\".sig\") | not)) | .browser_download_url" | head -1)
-fi
-
-if [[ -z "$ZFS_LINUX_URL" || -z "$ZFS_UTILS_URL" ]]; then
- error "Could not find matching ZFS packages. The archzfs repo may not have packages for kernel $KERNEL_VER yet."
-fi
-
-info "Downloading: $(basename "$ZFS_LINUX_URL")"
-wget -q -N -P "$ZFS_PKG_DIR" "$ZFS_LINUX_URL" || error "Failed to download zfs-linux"
-
-info "Downloading: $(basename "$ZFS_UTILS_URL")"
-wget -q -N -P "$ZFS_PKG_DIR" "$ZFS_UTILS_URL" || error "Failed to download zfs-utils"
-
# Clean previous builds
if [[ -d "$WORK_DIR" ]]; then
warn "Removing previous work directory..."
@@ -75,12 +46,72 @@ info "Copying base releng profile..."
rm -rf "$PROFILE_DIR"
cp -r /usr/share/archiso/configs/releng "$PROFILE_DIR"
-# Add our custom packages (NOT zfs - we'll install that separately)
-info "Adding custom packages..."
+# Switch from linux to linux-lts
+info "Switching to linux-lts kernel..."
+sed -i 's/^linux$/linux-lts/' "$PROFILE_DIR/packages.x86_64"
+sed -i 's/^linux-headers$/linux-lts-headers/' "$PROFILE_DIR/packages.x86_64"
+# broadcom-wl depends on linux, use DKMS version instead
+sed -i 's/^broadcom-wl$/broadcom-wl-dkms/' "$PROFILE_DIR/packages.x86_64"
+
+# Update bootloader configs to use linux-lts kernel
+info "Updating bootloader configurations for linux-lts..."
+
+# UEFI systemd-boot entries
+for entry in "$PROFILE_DIR"/efiboot/loader/entries/*.conf; do
+ if [[ -f "$entry" ]]; then
+ sed -i 's/vmlinuz-linux/vmlinuz-linux-lts/g' "$entry"
+ sed -i 's/initramfs-linux\.img/initramfs-linux-lts.img/g' "$entry"
+ fi
+done
+
+# BIOS syslinux entries
+for cfg in "$PROFILE_DIR"/syslinux/*.cfg; do
+ if [[ -f "$cfg" ]]; then
+ sed -i 's/vmlinuz-linux/vmlinuz-linux-lts/g' "$cfg"
+ sed -i 's/initramfs-linux\.img/initramfs-linux-lts.img/g' "$cfg"
+ fi
+done
+
+# GRUB config
+if [[ -f "$PROFILE_DIR/grub/grub.cfg" ]]; then
+ sed -i 's/vmlinuz-linux/vmlinuz-linux-lts/g' "$PROFILE_DIR/grub/grub.cfg"
+ sed -i 's/initramfs-linux\.img/initramfs-linux-lts.img/g' "$PROFILE_DIR/grub/grub.cfg"
+fi
+
+# Update mkinitcpio preset for linux-lts (archiso uses custom preset)
+if [[ -f "$PROFILE_DIR/airootfs/etc/mkinitcpio.d/linux.preset" ]]; then
+ # Rename to linux-lts.preset and update paths
+ mv "$PROFILE_DIR/airootfs/etc/mkinitcpio.d/linux.preset" \
+ "$PROFILE_DIR/airootfs/etc/mkinitcpio.d/linux-lts.preset"
+ sed -i 's/vmlinuz-linux/vmlinuz-linux-lts/g' \
+ "$PROFILE_DIR/airootfs/etc/mkinitcpio.d/linux-lts.preset"
+ sed -i 's/initramfs-linux/initramfs-linux-lts/g' \
+ "$PROFILE_DIR/airootfs/etc/mkinitcpio.d/linux-lts.preset"
+ sed -i "s/'linux' package/'linux-lts' package/g" \
+ "$PROFILE_DIR/airootfs/etc/mkinitcpio.d/linux-lts.preset"
+fi
+
+# Add archzfs repository to pacman.conf
+info "Adding archzfs repository..."
+cat >> "$PROFILE_DIR/pacman.conf" << 'EOF'
+
+[archzfs]
+Server = https://archzfs.com/$repo/$arch
+SigLevel = Optional TrustAll
+EOF
+
+# Add ZFS and our custom packages
+info "Adding ZFS and custom packages..."
cat >> "$PROFILE_DIR/packages.x86_64" << 'EOF'
+# ZFS support (DKMS builds from source - always matches kernel)
+zfs-dkms
+zfs-utils
+linux-lts-headers
+
# Additional networking
wget
+networkmanager
# Development tools for Claude Code
nodejs
@@ -106,11 +137,30 @@ sed -i 's/^iso_name=.*/iso_name="archzfs-claude"/' "$PROFILE_DIR/profiledef.sh"
# Create airootfs directories
mkdir -p "$PROFILE_DIR/airootfs/usr/local/bin"
mkdir -p "$PROFILE_DIR/airootfs/code"
-mkdir -p "$PROFILE_DIR/airootfs/var/cache/zfs-packages"
+mkdir -p "$PROFILE_DIR/airootfs/etc/systemd/system/multi-user.target.wants"
+
+# Enable SSH on live ISO
+info "Enabling SSH on live ISO..."
+ln -sf /usr/lib/systemd/system/sshd.service \
+ "$PROFILE_DIR/airootfs/etc/systemd/system/multi-user.target.wants/sshd.service"
+
+# Set root password for live ISO
+info "Setting root password for live ISO..."
+mkdir -p "$PROFILE_DIR/airootfs/etc"
+# Generate password hash
+PASS_HASH=$(openssl passwd -6 "$LIVE_ROOT_PASSWORD")
+# Create shadow file entry (will be merged with existing)
+cat > "$PROFILE_DIR/airootfs/etc/shadow" << EOF
+root:${PASS_HASH}:19000:0:99999:7:::
+EOF
+chmod 400 "$PROFILE_DIR/airootfs/etc/shadow"
-# Copy ZFS packages to airootfs for installation during boot
-info "Copying ZFS packages to ISO..."
-cp "$ZFS_PKG_DIR"/*.pkg.tar.zst "$PROFILE_DIR/airootfs/var/cache/zfs-packages/"
+# Allow root SSH login with password (for testing)
+mkdir -p "$PROFILE_DIR/airootfs/etc/ssh/sshd_config.d"
+cat > "$PROFILE_DIR/airootfs/etc/ssh/sshd_config.d/allow-root.conf" << 'EOF'
+PermitRootLogin yes
+PasswordAuthentication yes
+EOF
# Copy our custom scripts
info "Copying custom scripts..."
@@ -118,26 +168,6 @@ cp "$CUSTOM_DIR/install-archzfs" "$PROFILE_DIR/airootfs/usr/local/bin/"
cp "$CUSTOM_DIR/install-claude" "$PROFILE_DIR/airootfs/usr/local/bin/"
cp "$CUSTOM_DIR/archsetup-zfs" "$PROFILE_DIR/airootfs/usr/local/bin/"
-# Create ZFS setup script that runs on boot
-cat > "$PROFILE_DIR/airootfs/usr/local/bin/zfs-setup" << 'ZFSSETUP'
-#!/bin/bash
-# Install ZFS packages and load module
-# Run this first after booting the ISO
-
-set -e
-
-echo "Installing ZFS packages..."
-pacman -U --noconfirm /var/cache/zfs-packages/*.pkg.tar.zst
-
-echo "Loading ZFS module..."
-modprobe zfs
-
-echo ""
-echo "ZFS is ready! You can now run:"
-echo " install-archzfs"
-echo ""
-ZFSSETUP
-
# Set permissions in profiledef.sh
info "Setting file permissions..."
if grep -q "file_permissions=" "$PROFILE_DIR/profiledef.sh"; then
@@ -151,7 +181,7 @@ if grep -q "file_permissions=" "$PROFILE_DIR/profiledef.sh"; then
/)/ i\ ["/usr/local/bin/archsetup-zfs"]="0:0:755"
}' "$PROFILE_DIR/profiledef.sh"
sed -i '/^file_permissions=(/,/)/ {
- /)/ i\ ["/usr/local/bin/zfs-setup"]="0:0:755"
+ /)/ i\ ["/etc/shadow"]="0:0:400"
}' "$PROFILE_DIR/profiledef.sh"
fi
@@ -180,9 +210,13 @@ if [[ -f "$ISO_FILE" ]]; then
echo ""
info "To test: ./scripts/test-vm.sh"
echo ""
- info "After booting, run:"
- echo " zfs-setup # Install ZFS and load module"
- echo " install-archzfs # Run the installer"
+ info "After booting:"
+ echo " - ZFS is pre-loaded (no setup needed)"
+ echo " - SSH is enabled (root password: $LIVE_ROOT_PASSWORD)"
+ echo " - Run 'install-archzfs' to start installation"
+ echo ""
+ info "SSH access (from host):"
+ echo " ssh -p 2222 root@localhost"
else
error "Build failed - no ISO file found"
fi
diff --git a/custom/install-archzfs b/custom/install-archzfs
index 2afc9b6..733660b 100755
--- a/custom/install-archzfs
+++ b/custom/install-archzfs
@@ -4,75 +4,107 @@
#
# Installs Arch Linux on ZFS root with native encryption.
# Designed to be run from the custom archzfs ISO.
+#
+# Features:
+# - All questions asked upfront, then unattended installation
+# - Optional WiFi configuration with connection test
+# - ZFS native encryption (passphrase required at boot)
+# - Pre-pacman ZFS snapshots for safe upgrades
set -e
+#############################
+# Configuration
+#############################
+
# These will be set interactively
HOSTNAME=""
-USERNAME=""
TIMEZONE=""
LOCALE="en_US.UTF-8"
KEYMAP="us"
+ROOT_PASSWORD=""
+ZFS_PASSPHRASE=""
+WIFI_SSID=""
+WIFI_PASSWORD=""
# ZFS Configuration
POOL_NAME="zroot"
COMPRESSION="zstd"
ASHIFT="12" # 4K sectors (use 13 for 8K)
-# Colors
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-BLUE='\033[0;34m'
-CYAN='\033[0;36m'
-BOLD='\033[1m'
-NC='\033[0m'
+# Multi-disk RAID support
+SELECTED_DISKS=() # Array of selected disk paths (/dev/sda, /dev/sdb, ...)
+ZFS_PARTS=() # Array of ZFS partition paths
+EFI_PARTS=() # Array of EFI partition paths
+RAID_LEVEL="" # "", "mirror", "raidz1", "raidz2", "raidz3"
+ENABLE_SSH="yes" # Enable SSH with root login (default yes for headless)
# Logging
LOGFILE="/tmp/install-archzfs.log"
exec > >(tee -a "$LOGFILE") 2>&1
-info() { echo -e "${GREEN}[INFO]${NC} $1"; }
-warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
-error() { echo -e "${RED}[ERROR]${NC} $1"; exit 1; }
-step() { echo -e "\n${BLUE}==>${NC} ${CYAN}$1${NC}"; }
-prompt() { echo -e "${BOLD}$1${NC}"; }
+info() { echo "[INFO] $1"; }
+warn() { echo "[WARN] $1"; }
+error() { echo "[ERROR] $1"; exit 1; }
+step() { echo ""; echo "==> $1"; }
+prompt() { echo "$1"; }
-# Check root
-[[ $EUID -ne 0 ]] && error "This script must be run as root"
+#############################
+# Pre-flight Checks
+#############################
-# Check ZFS module
-if ! lsmod | grep -q zfs; then
- info "Loading ZFS module..."
- modprobe zfs || error "Failed to load ZFS module"
-fi
+preflight_checks() {
+ # Check root
+ [[ $EUID -ne 0 ]] && error "This script must be run as root"
+
+ # Check ZFS module
+ if ! lsmod | grep -q zfs; then
+ info "Loading ZFS module..."
+ modprobe zfs || error "Failed to load ZFS module. Is zfs-linux-lts installed?"
+ fi
+
+ info "ZFS module loaded successfully."
+}
+
+#############################
+# Phase 1: Gather All Input
+#############################
-### Interactive Configuration ###
-configure_install() {
- step "Installation Configuration"
+gather_input() {
+ echo ""
+ echo "╔═══════════════════════════════════════════════════════════════╗"
+ echo "║ Arch Linux ZFS Root ║"
+ echo "║ Configuration and Installation ║"
+ echo "╚═══════════════════════════════════════════════════════════════╝"
+ echo ""
+ info "Answer all questions now. Installation will run unattended afterward."
echo ""
- # Hostname
+ get_hostname
+ get_timezone
+ get_locale
+ get_keymap
+ get_disks
+ get_raid_level
+ get_wifi
+ get_zfs_passphrase
+ get_root_password
+ get_ssh_config
+ show_summary
+}
+
+get_hostname() {
+ step "Hostname"
prompt "Enter hostname for this system:"
read -p "> " HOSTNAME
while [[ -z "$HOSTNAME" || ! "$HOSTNAME" =~ ^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$ ]]; do
warn "Invalid hostname. Use letters, numbers, and hyphens (no spaces)."
read -p "> " HOSTNAME
done
+}
- echo ""
-
- # Username
- prompt "Enter primary username:"
- read -p "> " USERNAME
- while [[ -z "$USERNAME" || ! "$USERNAME" =~ ^[a-z_][a-z0-9_-]*$ ]]; do
- warn "Invalid username. Use lowercase letters, numbers, underscore, hyphen."
- read -p "> " USERNAME
- done
-
- echo ""
-
- # Timezone selection
+get_timezone() {
+ step "Timezone"
prompt "Select timezone region:"
PS3="Region: "
select region in "America" "Europe" "Asia" "Australia" "Pacific" "Other"; do
@@ -87,7 +119,6 @@ configure_install() {
else
echo ""
prompt "Select city:"
- # List cities for selected region
mapfile -t cities < <(find /usr/share/zoneinfo/"$region" -maxdepth 1 -type f -printf '%f\n' | sort)
PS3="City: "
select city in "${cities[@]}"; do
@@ -97,10 +128,10 @@ configure_install() {
fi
done
fi
+}
- echo ""
-
- # Locale selection
+get_locale() {
+ step "Locale"
prompt "Select locale:"
PS3="Locale: "
select loc in "en_US.UTF-8" "en_GB.UTF-8" "de_DE.UTF-8" "fr_FR.UTF-8" "es_ES.UTF-8" "Other"; do
@@ -114,10 +145,10 @@ configure_install() {
break
fi
done
+}
- echo ""
-
- # Keymap selection
+get_keymap() {
+ step "Keyboard Layout"
prompt "Select keyboard layout:"
PS3="Keymap: "
select km in "us" "uk" "de" "fr" "es" "dvorak" "Other"; do
@@ -131,126 +162,325 @@ configure_install() {
break
fi
done
+}
- # Confirm settings
+get_disks() {
+ step "Disk Selection"
echo ""
- echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
- echo -e "${BOLD}Configuration Summary:${NC}"
- echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
- echo " Hostname: $HOSTNAME"
- echo " Username: $USERNAME"
- echo " Timezone: $TIMEZONE"
- echo " Locale: $LOCALE"
- echo " Keymap: $KEYMAP"
- echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
+ echo "Available disks:"
+ echo "----------------"
+ lsblk -d -o NAME,SIZE,MODEL,TYPE | grep disk
echo ""
- read -p "Is this correct? [Y/n]: " confirm
- if [[ "$confirm" == "n" || "$confirm" == "N" ]]; then
- configure_install
+ # Get list of available disks
+ mapfile -t AVAILABLE_DISKS < <(lsblk -d -n -o NAME,TYPE | awk '$2=="disk"{print $1}')
+
+ if [[ ${#AVAILABLE_DISKS[@]} -eq 0 ]]; then
+ error "No disks found!"
+ fi
+
+ # Build dialog checklist items
+ local dialog_items=()
+ for disk in "${AVAILABLE_DISKS[@]}"; do
+ local size=$(lsblk -d -n -o SIZE "/dev/$disk" | tr -d ' ')
+ local model=$(lsblk -d -n -o MODEL "/dev/$disk" | tr -d ' ' | head -c 20)
+ dialog_items+=("$disk" "$size $model" "off")
+ done
+
+ # Use dialog for multi-select
+ local result
+ result=$(dialog --stdout --checklist "Select disks for installation (SPACE to select, ENTER to confirm):" \
+ 20 70 10 "${dialog_items[@]}") || error "Disk selection cancelled"
+
+ if [[ -z "$result" ]]; then
+ error "No disks selected!"
+ fi
+
+ # Parse selected disks
+ SELECTED_DISKS=()
+ for disk in $result; do
+ SELECTED_DISKS+=("/dev/$disk")
+ done
+
+ clear
+ echo ""
+ warn "Selected ${#SELECTED_DISKS[@]} disk(s):"
+ for disk in "${SELECTED_DISKS[@]}"; do
+ echo " - $disk"
+ lsblk "$disk" | sed 's/^/ /'
+ done
+ echo ""
+
+ read -p "This will DESTROY all data on these disks. Type 'yes' to continue: " confirm
+ if [[ "$confirm" != "yes" ]]; then
+ error "Aborted by user"
fi
}
-### Disk Selection ###
-select_disk() {
- step "Disk Selection"
+get_raid_level() {
+ local disk_count=${#SELECTED_DISKS[@]}
+
+ if [[ $disk_count -eq 1 ]]; then
+ RAID_LEVEL=""
+ info "Single disk selected - no RAID"
+ return
+ fi
+ step "RAID Configuration"
echo ""
- echo "Available disks:"
- echo "----------------"
- lsblk -d -o NAME,SIZE,MODEL,TYPE | grep disk
+ echo "You have selected $disk_count disks."
echo ""
- # Get list of disks
- mapfile -t DISKS < <(lsblk -d -n -o NAME,TYPE | awk '$2=="disk"{print $1}')
+ # Build options based on disk count
+ local options=("mirror" "stripe")
+ local descriptions=(
+ "mirror - All disks mirror each other (max redundancy, ${disk_count}x durability)"
+ "stripe - Combine disks for max capacity (NO redundancy, ${disk_count}x space)"
+ )
- if [[ ${#DISKS[@]} -eq 0 ]]; then
- error "No disks found!"
+ if [[ $disk_count -ge 3 ]]; then
+ options+=("raidz1")
+ descriptions+=("raidz1 - Single parity (can lose 1 disk)")
+ fi
+ if [[ $disk_count -ge 4 ]]; then
+ options+=("raidz2")
+ descriptions+=("raidz2 - Double parity (can lose 2 disks)")
+ fi
+ if [[ $disk_count -ge 5 ]]; then
+ options+=("raidz3")
+ descriptions+=("raidz3 - Triple parity (can lose 3 disks)")
fi
- PS3="Select disk for installation (number): "
- select disk in "${DISKS[@]}"; do
- if [[ -n "$disk" ]]; then
- DISK="/dev/$disk"
+ echo "Available RAID levels:"
+ for i in "${!descriptions[@]}"; do
+ echo " $((i+1))) ${descriptions[$i]}"
+ done
+ echo ""
+
+ PS3="Select RAID level: "
+ select level in "${options[@]}"; do
+ if [[ -n "$level" ]]; then
+ RAID_LEVEL="$level"
break
fi
done
+ info "RAID level: $RAID_LEVEL"
+}
+
+get_wifi() {
+ step "WiFi Configuration (Optional)"
echo ""
- warn "Selected disk: $DISK"
+ prompt "Do you want to configure WiFi? [y/N]:"
+ read -p "> " configure_wifi
+
+ if [[ "$configure_wifi" =~ ^[Yy]$ ]]; then
+ # Ensure NetworkManager is running
+ systemctl start NetworkManager 2>/dev/null || true
+ sleep 2
+
+ echo ""
+ info "Scanning for networks..."
+ nmcli device wifi rescan 2>/dev/null || true
+ sleep 2
+ echo ""
+ echo "Available networks:"
+ nmcli device wifi list
+ echo ""
+
+ prompt "Enter WiFi SSID:"
+ read -p "> " WIFI_SSID
+
+ prompt "Enter WiFi password:"
+ read -s -p "> " WIFI_PASSWORD
+ echo ""
+
+ # Test the connection
+ info "Testing WiFi connection..."
+ if nmcli device wifi connect "$WIFI_SSID" password "$WIFI_PASSWORD" 2>/dev/null; then
+ info "WiFi connection successful!"
+ else
+ warn "WiFi connection failed. You can configure it manually after installation."
+ WIFI_SSID=""
+ WIFI_PASSWORD=""
+ fi
+ else
+ info "Skipping WiFi configuration."
+ fi
+}
+
+get_zfs_passphrase() {
+ step "ZFS Encryption Passphrase"
+ echo ""
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+ echo "This passphrase will be required at EVERY boot."
+ echo ""
+ echo "Requirements:"
+ echo " - Use a strong, memorable passphrase"
+ echo " - If forgotten, your data is UNRECOVERABLE"
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
- lsblk "$DISK"
+
+ while true; do
+ prompt "Enter ZFS encryption passphrase:"
+ read -s -p "> " ZFS_PASSPHRASE
+ echo ""
+
+ prompt "Confirm passphrase:"
+ read -s -p "> " confirm_pass
+ echo ""
+
+ if [[ "$ZFS_PASSPHRASE" == "$confirm_pass" ]]; then
+ if [[ ${#ZFS_PASSPHRASE} -lt 8 ]]; then
+ warn "Passphrase should be at least 8 characters."
+ continue
+ fi
+ break
+ else
+ warn "Passphrases do not match. Try again."
+ fi
+ done
+}
+
+get_root_password() {
+ step "Root Password"
echo ""
- read -p "This will DESTROY all data on $DISK. Type 'yes' to continue: " confirm
- [[ "$confirm" != "yes" ]] && error "Aborted by user"
+ while true; do
+ prompt "Enter root password:"
+ read -s -p "> " ROOT_PASSWORD
+ echo ""
+
+ prompt "Confirm root password:"
+ read -s -p "> " confirm_pass
+ echo ""
+
+ if [[ "$ROOT_PASSWORD" == "$confirm_pass" ]]; then
+ break
+ else
+ warn "Passwords do not match. Try again."
+ fi
+ done
}
-### Partitioning ###
-partition_disk() {
- step "Partitioning $DISK"
-
- # Wipe existing signatures
- info "Wiping existing signatures..."
- wipefs -af "$DISK"
- sgdisk --zap-all "$DISK"
-
- # Create partitions
- # 1: EFI System Partition (1GB)
- # 2: ZFS partition (rest)
- info "Creating partitions..."
- sgdisk -n 1:0:+1G -t 1:ef00 -c 1:"EFI" "$DISK"
- sgdisk -n 2:0:0 -t 2:bf00 -c 2:"ZFS" "$DISK"
-
- # Determine partition names (handle nvme vs sda naming)
- if [[ "$DISK" == *"nvme"* ]] || [[ "$DISK" == *"mmcblk"* ]]; then
- EFI_PART="${DISK}p1"
- ZFS_PART="${DISK}p2"
+get_ssh_config() {
+ step "SSH Configuration"
+ echo ""
+ info "SSH enables remote access after installation."
+ info "Recommended for headless servers. Harden with archsetup later."
+ echo ""
+ prompt "Enable SSH with root login? [Y/n]:"
+ read -p "> " ssh_choice
+
+ if [[ "$ssh_choice" =~ ^[Nn]$ ]]; then
+ ENABLE_SSH="no"
+ info "SSH will not be enabled."
else
- EFI_PART="${DISK}1"
- ZFS_PART="${DISK}2"
+ ENABLE_SSH="yes"
+ info "SSH will be enabled with root password login."
+ warn "Remember to harden SSH (key auth, fail2ban) with archsetup!"
fi
+}
+
+show_summary() {
+ echo ""
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+ echo "Configuration Summary:"
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+ echo " Hostname: $HOSTNAME"
+ echo " Timezone: $TIMEZONE"
+ echo " Locale: $LOCALE"
+ echo " Keymap: $KEYMAP"
+ echo " Disks: ${#SELECTED_DISKS[@]} disk(s)"
+ for disk in "${SELECTED_DISKS[@]}"; do
+ local size=$(lsblk -d -n -o SIZE "$disk" | tr -d ' ')
+ echo " - $disk ($size)"
+ done
+ echo " RAID Level: ${RAID_LEVEL:-single (no RAID)}"
+ echo " WiFi: ${WIFI_SSID:-Not configured}"
+ echo " SSH: ${ENABLE_SSH:-yes} (root login)"
+ echo " ZFS Pool: $POOL_NAME (encrypted)"
+ echo " Boot: EFI on all disks (redundant)"
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+ echo ""
+
+ read -p "Press Enter to begin installation, or Ctrl+C to abort..."
+}
+
+#############################
+# Phase 2: Installation
+#############################
+
+partition_disks() {
+ step "Partitioning ${#SELECTED_DISKS[@]} disk(s)"
+
+ EFI_PARTS=()
+ ZFS_PARTS=()
+
+ for disk in "${SELECTED_DISKS[@]}"; do
+ info "Partitioning $disk..."
+
+ # Wipe existing signatures
+ wipefs -af "$disk"
+ sgdisk --zap-all "$disk"
+
+ # Create partitions: 1G EFI + rest for ZFS
+ sgdisk -n 1:0:+1G -t 1:ef00 -c 1:"EFI" "$disk"
+ sgdisk -n 2:0:0 -t 2:bf00 -c 2:"ZFS" "$disk"
+
+ # Determine partition names (handle nvme/mmcblk naming)
+ local efi_part zfs_part
+ if [[ "$disk" == *"nvme"* ]] || [[ "$disk" == *"mmcblk"* ]]; then
+ efi_part="${disk}p1"
+ zfs_part="${disk}p2"
+ else
+ efi_part="${disk}1"
+ zfs_part="${disk}2"
+ fi
+
+ EFI_PARTS+=("$efi_part")
+ ZFS_PARTS+=("$zfs_part")
+
+ sleep 1
+ partprobe "$disk"
+ done
- # Wait for partitions to appear
- sleep 2
- partprobe "$DISK"
sleep 2
- # Format EFI partition
- info "Formatting EFI partition..."
- mkfs.fat -F32 -n EFI "$EFI_PART"
+ # Format all EFI partitions
+ for i in "${!EFI_PARTS[@]}"; do
+ info "Formatting EFI partition ${EFI_PARTS[$i]}..."
+ mkfs.fat -F32 -n "EFI$i" "${EFI_PARTS[$i]}"
+ done
- info "Partitioning complete."
- lsblk "$DISK"
+ info "Partitioning complete. Created ${#EFI_PARTS[@]} EFI and ${#ZFS_PARTS[@]} ZFS partitions."
}
-### ZFS Pool Creation ###
create_zfs_pool() {
step "Creating ZFS Pool with Native Encryption"
- # Check if pool already exists
if zpool list "$POOL_NAME" &>/dev/null; then
warn "Pool $POOL_NAME already exists. Destroying..."
zpool destroy -f "$POOL_NAME"
fi
- echo ""
- echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
- echo -e "${BOLD}ZFS Encryption Passphrase${NC}"
- echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
- echo ""
- echo "You will now create an encryption passphrase."
- echo "This passphrase will be required at EVERY boot."
- echo ""
- echo "Requirements:"
- echo " - Use a strong, memorable passphrase"
- echo " - If forgotten, your data is UNRECOVERABLE"
- echo ""
- echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
- echo ""
+ # Build pool configuration based on RAID level
+ local pool_config
+ if [[ "$RAID_LEVEL" == "stripe" ]]; then
+ # Stripe: just list devices without a vdev type (RAID0 equivalent)
+ pool_config="${ZFS_PARTS[*]}"
+ info "Creating striped pool with ${#ZFS_PARTS[@]} disks (NO redundancy)..."
+ warn "Data loss will occur if ANY disk fails!"
+ elif [[ -n "$RAID_LEVEL" ]]; then
+ pool_config="$RAID_LEVEL ${ZFS_PARTS[*]}"
+ info "Creating $RAID_LEVEL pool with ${#ZFS_PARTS[@]} disks..."
+ else
+ pool_config="${ZFS_PARTS[0]}"
+ info "Creating single-disk pool..."
+ fi
- # Create encrypted pool
- zpool create -f \
+ # Create encrypted pool using passphrase from variable
+ echo "$ZFS_PASSPHRASE" | zpool create -f \
-o ashift="$ASHIFT" \
-o autotrim=on \
-O acltype=posixacl \
@@ -266,33 +496,37 @@ create_zfs_pool() {
-O keylocation=prompt \
-O mountpoint=none \
-R /mnt \
- "$POOL_NAME" "$ZFS_PART"
+ "$POOL_NAME" $pool_config
info "ZFS pool created successfully."
+ zpool status "$POOL_NAME"
}
-### Dataset Creation ###
create_datasets() {
step "Creating ZFS Datasets"
# Root dataset container
zfs create -o mountpoint=none -o canmount=off "$POOL_NAME/ROOT"
- # Main root filesystem with reservation for safety
- zfs create -o mountpoint=/ -o canmount=noauto -o reservation=50G "$POOL_NAME/ROOT/default"
+ # Main root filesystem
+ # Reserve 20% of pool or 20G max to prevent pool from filling completely
+ local pool_size_bytes=$(zpool get -Hp size "$POOL_NAME" | awk '{print $3}')
+ local pool_size_gb=$((pool_size_bytes / 1024 / 1024 / 1024))
+ local reserve_gb=$((pool_size_gb / 5)) # 20%
+ [[ $reserve_gb -gt 20 ]] && reserve_gb=20
+ [[ $reserve_gb -lt 5 ]] && reserve_gb=5
- # Mount root first
+ zfs create -o mountpoint=/ -o canmount=noauto -o reservation=${reserve_gb}G "$POOL_NAME/ROOT/default"
zfs mount "$POOL_NAME/ROOT/default"
- # Home datasets
+ # Home (archsetup will create user subdataset)
zfs create -o mountpoint=/home "$POOL_NAME/home"
zfs create -o mountpoint=/root "$POOL_NAME/home/root"
- zfs create -o mountpoint="/home/$USERNAME" "$POOL_NAME/home/$USERNAME"
- # Media dataset - compression off for already-compressed files
+ # Media - compression off for already-compressed files
zfs create -o mountpoint=/media -o compression=off "$POOL_NAME/media"
- # VMs dataset - larger recordsize for VM disk images
+ # VMs - 64K recordsize for VM disk images
zfs create -o mountpoint=/vms -o recordsize=64K "$POOL_NAME/vms"
# Var datasets
@@ -303,46 +537,56 @@ create_datasets() {
zfs create -o mountpoint=/var/lib/pacman "$POOL_NAME/var/lib/pacman"
zfs create -o mountpoint=/var/lib/docker "$POOL_NAME/var/lib/docker"
- # Exclude temp directories from snapshots
+ # Temp directories - excluded from snapshots
zfs create -o mountpoint=/var/tmp -o com.sun:auto-snapshot=false "$POOL_NAME/var/tmp"
zfs create -o mountpoint=/tmp -o com.sun:auto-snapshot=false "$POOL_NAME/tmp"
chmod 1777 /mnt/tmp /mnt/var/tmp
info "Datasets created:"
- echo ""
- zfs list -r "$POOL_NAME" -o name,mountpoint,compression,reservation
+ zfs list -r "$POOL_NAME" -o name,mountpoint,compression
}
-### Mount EFI ###
mount_efi() {
step "Mounting EFI Partition"
-
mkdir -p /mnt/boot
- mount "$EFI_PART" /mnt/boot
-
- info "EFI partition mounted at /mnt/boot"
+ # Mount primary (first) EFI partition
+ mount "${EFI_PARTS[0]}" /mnt/boot
+ info "Primary EFI partition ${EFI_PARTS[0]} mounted at /mnt/boot"
}
-### Install Base System ###
install_base() {
step "Installing Base System"
info "Updating pacman keys..."
pacman-key --init
pacman-key --populate archlinux
+
+ # Add archzfs key
pacman-key -r DDF7DB817396A49B2A2723F7403BD972F75D9D76 2>/dev/null || true
pacman-key --lsign-key DDF7DB817396A49B2A2723F7403BD972F75D9D76 2>/dev/null || true
+ # Add archzfs repo to pacman.conf for pacstrap
+ if ! grep -q "\[archzfs\]" /etc/pacman.conf; then
+ cat >> /etc/pacman.conf << 'EOF'
+
+[archzfs]
+Server = https://archzfs.com/$repo/$arch
+SigLevel = Optional TrustAll
+EOF
+ fi
+
info "Installing base packages (this takes a while)..."
+ info "ZFS will be built from source via DKMS - this ensures kernel compatibility."
pacstrap -K /mnt \
base \
base-devel \
- linux \
- linux-headers \
+ linux-lts \
+ linux-lts-headers \
linux-firmware \
- zfs-linux \
+ zfs-dkms \
zfs-utils \
grub \
+ freetype2 \
efibootmgr \
networkmanager \
openssh \
@@ -352,16 +596,15 @@ install_base() {
zsh \
nodejs \
npm \
- sanoid
+ ttf-dejavu
info "Base system installed."
}
-### Configure System ###
configure_system() {
step "Configuring System"
- # Generate fstab (only for EFI, ZFS handles the rest)
+ # fstab (only for EFI)
info "Generating fstab..."
echo "# /boot - EFI System Partition" > /mnt/etc/fstab
echo "UUID=$(blkid -s UUID -o value "$EFI_PART") /boot vfat defaults,noatime 0 2" >> /mnt/etc/fstab
@@ -389,7 +632,7 @@ configure_system() {
127.0.1.1 $HOSTNAME.localdomain $HOSTNAME
EOF
- # Add archzfs repo to installed system
+ # Add archzfs repo
info "Adding archzfs repository..."
cat >> /mnt/etc/pacman.conf << 'EOF'
@@ -398,27 +641,60 @@ Server = https://archzfs.com/$repo/$arch
SigLevel = Optional TrustAll
EOF
- # Import archzfs key in chroot
+ # Import archzfs key
arch-chroot /mnt pacman-key -r DDF7DB817396A49B2A2723F7403BD972F75D9D76 2>/dev/null || true
arch-chroot /mnt pacman-key --lsign-key DDF7DB817396A49B2A2723F7403BD972F75D9D76 2>/dev/null || true
+
+ # Set root password
+ info "Setting root password..."
+ echo "root:$ROOT_PASSWORD" | arch-chroot /mnt chpasswd
+}
+
+configure_wifi() {
+ if [[ -n "$WIFI_SSID" ]]; then
+ step "Configuring WiFi"
+
+ # Copy NetworkManager connection from live environment
+ if [[ -d /etc/NetworkManager/system-connections ]]; then
+ mkdir -p /mnt/etc/NetworkManager/system-connections
+ cp /etc/NetworkManager/system-connections/* /mnt/etc/NetworkManager/system-connections/ 2>/dev/null || true
+ chmod 600 /mnt/etc/NetworkManager/system-connections/* 2>/dev/null || true
+ fi
+
+ info "WiFi configuration copied to installed system."
+ fi
+}
+
+configure_ssh() {
+ if [[ "$ENABLE_SSH" == "yes" ]]; then
+ step "Configuring SSH"
+
+ # Ensure sshd config allows root login with password
+ sed -i 's/^#PermitRootLogin.*/PermitRootLogin yes/' /mnt/etc/ssh/sshd_config
+ sed -i 's/^PermitRootLogin.*/PermitRootLogin yes/' /mnt/etc/ssh/sshd_config
+
+ # Enable sshd service
+ arch-chroot /mnt systemctl enable sshd
+
+ info "SSH enabled with root password login."
+ warn "Run archsetup to harden SSH (key auth, fail2ban)."
+ else
+ info "SSH not enabled. Enable manually if needed."
+ fi
}
-### Configure mkinitcpio ###
configure_initramfs() {
step "Configuring Initramfs for ZFS"
- # Backup original
cp /mnt/etc/mkinitcpio.conf /mnt/etc/mkinitcpio.conf.bak
# Configure hooks for ZFS
- # Order matters: keyboard before zfs for passphrase entry
sed -i 's/^HOOKS=.*/HOOKS=(base udev autodetect microcode modconf kms keyboard keymap consolefont block zfs filesystems fsck)/' /mnt/etc/mkinitcpio.conf
info "Regenerating initramfs..."
arch-chroot /mnt mkinitcpio -P
}
-### Configure Bootloader ###
configure_bootloader() {
step "Configuring GRUB Bootloader"
@@ -434,20 +710,49 @@ GRUB_TERMINAL_OUTPUT="console"
GRUB_DISABLE_OS_PROBER=true
GRUB_GFXMODE=auto
GRUB_GFXPAYLOAD_LINUX=keep
+GRUB_FONT=/boot/grub/fonts/DejaVuSansMono32.pf2
EOF
- info "Installing GRUB..."
- arch-chroot /mnt grub-install --target=x86_64-efi --efi-directory=/boot --bootloader-id=GRUB
+ # Install GRUB to each EFI partition for boot redundancy
+ info "Installing GRUB to ${#EFI_PARTS[@]} EFI partition(s)..."
+
+ for i in "${!EFI_PARTS[@]}"; do
+ local efi_part="${EFI_PARTS[$i]}"
+ local bootloader_id="GRUB"
+ if [[ ${#EFI_PARTS[@]} -gt 1 ]]; then
+ bootloader_id="GRUB-disk$((i+1))"
+ fi
+
+ # Unmount current boot if mounted, mount this EFI partition
+ umount /mnt/boot 2>/dev/null || true
+ mount "$efi_part" /mnt/boot
+
+ # Create directories and font
+ mkdir -p /mnt/boot/grub/fonts
+ arch-chroot /mnt grub-mkfont -s 32 -o /boot/grub/fonts/DejaVuSansMono32.pf2 \
+ /usr/share/fonts/TTF/DejaVuSansMono.ttf 2>/dev/null || true
- info "Generating GRUB configuration..."
- arch-chroot /mnt grub-mkconfig -o /boot/grub/grub.cfg
+ # Install GRUB
+ info "Installing GRUB to $efi_part (bootloader-id: $bootloader_id)..."
+ arch-chroot /mnt grub-install --target=x86_64-efi --efi-directory=/boot \
+ --bootloader-id="$bootloader_id" --recheck
+
+ # Generate configuration
+ arch-chroot /mnt grub-mkconfig -o /boot/grub/grub.cfg
+ done
+
+ # Remount primary EFI for rest of installation
+ umount /mnt/boot 2>/dev/null || true
+ mount "${EFI_PARTS[0]}" /mnt/boot
+
+ if [[ ${#EFI_PARTS[@]} -gt 1 ]]; then
+ info "GRUB installed to all ${#EFI_PARTS[@]} disks for boot redundancy."
+ fi
}
-### Configure ZFS Services ###
configure_zfs_services() {
step "Configuring ZFS Services"
- # Enable ZFS services
arch-chroot /mnt systemctl enable zfs.target
arch-chroot /mnt systemctl enable zfs-import-cache
arch-chroot /mnt systemctl enable zfs-mount
@@ -458,7 +763,7 @@ configure_zfs_services() {
zpool set cachefile=/etc/zfs/zpool.cache "$POOL_NAME"
cp /etc/zfs/zpool.cache /mnt/etc/zfs/
- # Set bootfs property
+ # Set bootfs
zpool set bootfs="$POOL_NAME/ROOT/default" "$POOL_NAME"
# Enable other services
@@ -468,93 +773,8 @@ configure_zfs_services() {
info "ZFS services configured."
}
-### Configure Sanoid (Snapshot Management) ###
-configure_sanoid() {
- step "Configuring Sanoid Snapshot Management"
-
- mkdir -p /mnt/etc/sanoid
-
- cat > /mnt/etc/sanoid/sanoid.conf << EOF
-# Sanoid configuration for ZFS snapshots
-# https://github.com/jimsalterjrs/sanoid
-
-#############################
-# Templates
-#############################
-
-[template_production]
- # Frequent snapshots for active data
- hourly = 24
- daily = 7
- weekly = 4
- monthly = 12
- yearly = 0
- autosnap = yes
- autoprune = yes
-
-[template_backup]
- # Less frequent for large/static data
- hourly = 0
- daily = 7
- weekly = 4
- monthly = 6
- yearly = 0
- autosnap = yes
- autoprune = yes
-
-[template_none]
- # No automatic snapshots (for tmp, cache)
- autosnap = no
- autoprune = yes
-
-#############################
-# Datasets
-#############################
-
-# Root filesystem
-[$POOL_NAME/ROOT/default]
- use_template = production
-
-# Home directories
-[$POOL_NAME/home]
- use_template = production
- recursive = yes
-
-# Media (large files, less frequent snapshots)
-[$POOL_NAME/media]
- use_template = backup
-
-# VMs (snapshot before changes manually, or less frequently)
-[$POOL_NAME/vms]
- use_template = backup
-
-# Var data
-[$POOL_NAME/var/log]
- use_template = production
-
-[$POOL_NAME/var/lib/pacman]
- use_template = production
-
-# No snapshots for cache/tmp (handled by dataset property, but explicit here)
-[$POOL_NAME/var/cache]
- use_template = none
-
-[$POOL_NAME/var/tmp]
- use_template = none
-
-[$POOL_NAME/tmp]
- use_template = none
-EOF
-
- # Enable sanoid timer
- arch-chroot /mnt systemctl enable sanoid.timer
-
- info "Sanoid configured. Snapshots will run automatically."
-}
-
-### Configure Pacman ZFS Snapshot Hook ###
configure_pacman_hook() {
- step "Configuring Pacman Pre-Upgrade Snapshot Hook"
+ step "Configuring Pacman Snapshot Hook"
mkdir -p /mnt/etc/pacman.d/hooks
@@ -574,15 +794,11 @@ EOF
cat > /mnt/usr/local/bin/zfs-pre-snapshot << 'EOF'
#!/bin/bash
-# Create a ZFS snapshot before pacman transactions
-# This allows easy rollback if an upgrade breaks something
-
POOL="zroot"
DATASET="$POOL/ROOT/default"
TIMESTAMP=$(date +%Y-%m-%d_%H-%M-%S)
SNAPSHOT_NAME="pre-pacman_$TIMESTAMP"
-# Create the snapshot
if zfs snapshot "$DATASET@$SNAPSHOT_NAME"; then
echo "Created snapshot: $DATASET@$SNAPSHOT_NAME"
else
@@ -592,152 +808,121 @@ EOF
chmod +x /mnt/usr/local/bin/zfs-pre-snapshot
- info "Pacman hook configured. Snapshots will be created before each transaction."
+ info "Pacman hook configured."
}
-### Create User ###
-create_user() {
- step "Creating User: $USERNAME"
+copy_archsetup() {
+ step "Installing archsetup Launcher"
- arch-chroot /mnt useradd -m -G wheel -s /bin/zsh "$USERNAME" 2>/dev/null || \
- warn "User $USERNAME may already exist"
+ cat > /mnt/usr/local/bin/archsetup << 'EOF'
+#!/bin/bash
+curl -fsSL https://cjennings.net/archsetup | bash
+EOF
+ chmod +x /mnt/usr/local/bin/archsetup
+ info "archsetup launcher installed to /usr/local/bin/archsetup"
+}
- # Set ownership of home dataset
- arch-chroot /mnt chown -R "$USERNAME:$USERNAME" "/home/$USERNAME"
+sync_efi_partitions() {
+ # Skip if only one disk
+ if [[ ${#EFI_PARTS[@]} -le 1 ]]; then
+ return
+ fi
- # Configure sudo
- echo "%wheel ALL=(ALL:ALL) ALL" > /mnt/etc/sudoers.d/wheel
- chmod 440 /mnt/etc/sudoers.d/wheel
+ step "Syncing EFI Partitions for Redundancy"
- echo ""
- echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
- echo -e "${BOLD}Set User Password${NC}"
- echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
- info "Set password for $USERNAME:"
- arch-chroot /mnt passwd "$USERNAME"
+ local primary_efi="${EFI_PARTS[0]}"
+ local temp_mount="/mnt/efi_sync"
- echo ""
- info "Set password for root:"
- arch-chroot /mnt passwd
-}
+ for i in "${!EFI_PARTS[@]}"; do
+ if [[ $i -eq 0 ]]; then
+ continue # Skip primary
+ fi
-### Copy archsetup ###
-copy_archsetup() {
- step "Copying archsetup to New System"
+ local efi_part="${EFI_PARTS[$i]}"
+ info "Syncing to EFI partition $((i+1)): $efi_part"
- if [[ -d /code/archsetup ]]; then
- mkdir -p "/mnt/home/$USERNAME/code"
- cp -r /code/archsetup "/mnt/home/$USERNAME/code/"
- arch-chroot /mnt chown -R "$USERNAME:$USERNAME" "/home/$USERNAME/code"
- info "archsetup copied to /home/$USERNAME/code/archsetup"
- else
- warn "archsetup not found in ISO, skipping..."
- fi
+ mkdir -p "$temp_mount"
+ mount "$efi_part" "$temp_mount"
+
+ # Sync all content from primary EFI (mounted at /mnt/boot) to secondary
+ rsync -a --delete /mnt/boot/ "$temp_mount/"
+
+ umount "$temp_mount"
+ done
+
+ rmdir "$temp_mount" 2>/dev/null || true
+ info "All EFI partitions synchronized."
}
-### Create Syncoid Script for TrueNAS ###
-create_syncoid_script() {
- step "Creating Syncoid Replication Script"
+create_genesis_snapshot() {
+ step "Creating Genesis Snapshot"
- cat > /mnt/usr/local/bin/zfs-replicate << 'SCRIPT'
+ # Create recursive snapshot of entire pool
+ info "Creating snapshot ${POOL_NAME}@genesis..."
+ zfs snapshot -r "${POOL_NAME}@genesis"
+
+ # Create rollback script in /root
+ info "Installing rollback-to-genesis script..."
+ cat > /mnt/root/rollback-to-genesis << 'ROLLBACK_EOF'
#!/bin/bash
-# zfs-replicate - Replicate ZFS datasets to TrueNAS
-# Usage: zfs-replicate [dataset] [target]
+# rollback-to-genesis - Roll back all datasets to the genesis snapshot
#
-# Examples:
-# zfs-replicate # Replicate all configured datasets
-# zfs-replicate zroot/home user@truenas:/tank/backup/laptop
+# This script rolls back the entire ZFS pool to its pristine post-install state.
+# WARNING: This will destroy all changes made since installation!
set -e
-# Configuration - edit these for your TrueNAS setup
-TRUENAS_HOST="truenas" # TrueNAS hostname or IP
-TRUENAS_USER="root" # User with ZFS permissions
-TRUENAS_POOL="tank" # Destination pool
-BACKUP_PATH="backup/laptop" # Path under the pool
-
-# Datasets to replicate (space-separated)
-DATASETS="zroot/ROOT/default zroot/home zroot/media zroot/vms"
-
-# Colors
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-RED='\033[0;31m'
-NC='\033[0m'
-
-info() { echo -e "${GREEN}[INFO]${NC} $1"; }
-warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
-error() { echo -e "${RED}[ERROR]${NC} $1"; exit 1; }
-
-# Check if syncoid is installed
-command -v syncoid >/dev/null 2>&1 || error "syncoid not found. Install sanoid package."
-
-# Single dataset mode
-if [[ -n "$1" ]] && [[ -n "$2" ]]; then
- info "Replicating $1 to $2"
- syncoid --recursive "$1" "$2"
- exit 0
-fi
+POOL_NAME="zroot"
-# Full replication mode
-info "Starting ZFS replication to $TRUENAS_HOST"
+echo "╔═══════════════════════════════════════════════════════════════╗"
+echo "║ WARNING: Full System Rollback ║"
+echo "╚═══════════════════════════════════════════════════════════════╝"
+echo ""
+echo "This will roll back ALL datasets to the genesis snapshot!"
+echo "All changes since installation will be permanently lost."
echo ""
-for dataset in $DATASETS; do
- dest="$TRUENAS_USER@$TRUENAS_HOST:$TRUENAS_POOL/$BACKUP_PATH/${dataset#zroot/}"
- info "Replicating $dataset -> $dest"
-
- if syncoid --recursive "$dataset" "$dest"; then
- info " Success"
- else
- warn " Failed (will retry next run)"
- fi
- echo ""
+# Show what will be rolled back
+echo "Datasets to roll back:"
+zfs list -r -t snapshot -o name "${POOL_NAME}" 2>/dev/null | grep "@genesis" | while read snap; do
+ dataset="${snap%@genesis}"
+ echo " - $dataset"
done
+echo ""
-info "Replication complete."
-SCRIPT
-
- chmod +x /mnt/usr/local/bin/zfs-replicate
-
- # Create systemd service and timer for automatic replication
- cat > /mnt/etc/systemd/system/zfs-replicate.service << 'EOF'
-[Unit]
-Description=ZFS Replication to TrueNAS
-After=network-online.target
-Wants=network-online.target
-
-[Service]
-Type=oneshot
-ExecStart=/usr/local/bin/zfs-replicate
-User=root
-
-[Install]
-WantedBy=multi-user.target
-EOF
+read -p "Type 'ROLLBACK' to confirm: " confirm
+if [[ "$confirm" != "ROLLBACK" ]]; then
+ echo "Aborted."
+ exit 1
+fi
- cat > /mnt/etc/systemd/system/zfs-replicate.timer << 'EOF'
-[Unit]
-Description=Run ZFS replication nightly
+echo ""
+echo "Rolling back to genesis..."
-[Timer]
-OnCalendar=*-*-* 02:00:00
-RandomizedDelaySec=1800
-Persistent=true
+# Roll back each dataset (must do in reverse order for dependencies)
+zfs list -r -H -o name "${POOL_NAME}" | tac | while read dataset; do
+ if zfs list -t snapshot "${dataset}@genesis" &>/dev/null; then
+ echo " Rolling back: $dataset"
+ zfs rollback -r "${dataset}@genesis"
+ fi
+done
-[Install]
-WantedBy=timers.target
-EOF
+echo ""
+echo "Rollback complete!"
+echo "Reboot to complete the process: reboot"
+ROLLBACK_EOF
- info "Syncoid replication script created."
- info "Edit /usr/local/bin/zfs-replicate to configure your TrueNAS connection."
- info "Enable with: systemctl enable --now zfs-replicate.timer"
+ chmod +x /mnt/root/rollback-to-genesis
+ info "Genesis snapshot created. Rollback script: /root/rollback-to-genesis"
}
-### Unmount and Export ###
cleanup() {
step "Cleaning Up"
+ # Clear sensitive variables
+ ROOT_PASSWORD=""
+ ZFS_PASSPHRASE=""
+
info "Unmounting filesystems..."
umount /mnt/boot 2>/dev/null || true
@@ -747,85 +932,73 @@ cleanup() {
info "Cleanup complete."
}
-### Print Summary ###
print_summary() {
echo ""
- echo -e "${GREEN}╔═══════════════════════════════════════════════════════════════╗${NC}"
- echo -e "${GREEN}║ Installation Complete! ║${NC}"
- echo -e "${GREEN}╚═══════════════════════════════════════════════════════════════╝${NC}"
+ echo "╔═══════════════════════════════════════════════════════════════╗"
+ echo "║ Installation Complete! ║"
+ echo "╚═══════════════════════════════════════════════════════════════╝"
echo ""
- echo -e "${BOLD}System Configuration:${NC}"
+ echo "System Configuration:"
echo " Hostname: $HOSTNAME"
- echo " Username: $USERNAME"
echo " Timezone: $TIMEZONE"
echo " ZFS Pool: $POOL_NAME (encrypted)"
echo ""
- echo -e "${BOLD}ZFS Features Configured:${NC}"
- echo " - Automatic snapshots via sanoid (hourly/daily/weekly/monthly)"
+ echo "ZFS Features:"
+ echo " - Genesis snapshot: pristine post-install state"
echo " - Pre-pacman snapshots for safe upgrades"
- echo " - Replication script ready for TrueNAS"
- echo ""
- echo -e "${BOLD}Next Steps:${NC}"
- echo " 1. Reboot: ${CYAN}reboot${NC}"
- echo " 2. Enter your ZFS encryption passphrase at boot"
- echo " 3. Log in as $USERNAME"
- echo " 4. Run archsetup: ${CYAN}cd ~/code/archsetup && sudo ./archsetup${NC}"
+ echo " - Sanoid/syncoid configured by archsetup"
echo ""
- echo -e "${BOLD}Configure TrueNAS Replication:${NC}"
- echo " 1. Set up SSH key auth to TrueNAS"
- echo " 2. Edit: ${CYAN}/usr/local/bin/zfs-replicate${NC}"
- echo " 3. Enable: ${CYAN}sudo systemctl enable --now zfs-replicate.timer${NC}"
+ echo "Next Steps:"
+ echo " 1. Reboot: reboot"
+ echo " 2. Enter ZFS encryption passphrase at boot"
+ echo " 3. Log in as root"
+ echo " 4. Run archsetup: archsetup"
echo ""
- echo -e "${BOLD}Useful ZFS Commands:${NC}"
- echo " List snapshots: ${CYAN}zfs list -t snapshot${NC}"
- echo " Manual snapshot: ${CYAN}sudo zfs snapshot zroot/home@my-snapshot${NC}"
- echo " Rollback: ${CYAN}sudo zfs rollback zroot/home@my-snapshot${NC}"
- echo " Check pool status: ${CYAN}zpool status${NC}"
+ echo "Useful Commands:"
+ echo " List snapshots: zfs list -t snapshot"
+ echo " Manual snapshot: zfs snapshot zroot/home@my-backup"
+ echo " Rollback: zfs rollback zroot/home@my-backup"
+ echo " Factory reset: /root/rollback-to-genesis"
+ echo " Pool status: zpool status"
echo ""
- echo -e "${BOLD}If Something Goes Wrong:${NC}"
- echo " Boot from this ISO, then:"
- echo " ${CYAN}zpool import -R /mnt zroot${NC}"
- echo " ${CYAN}zfs load-key zroot${NC}"
- echo " ${CYAN}zfs mount zroot/ROOT/default${NC}"
- echo ""
- info "Installation log saved to: $LOGFILE"
+ info "Installation log: $LOGFILE"
echo ""
}
-### Main Installation Flow ###
+#############################
+# Main
+#############################
+
main() {
- echo ""
- echo -e "${CYAN}╔═══════════════════════════════════════════════════════════════╗${NC}"
- echo -e "${CYAN}║ Arch Linux ZFS Root Installation ║${NC}"
- echo -e "${CYAN}║ with Native Encryption ║${NC}"
- echo -e "${CYAN}╚═══════════════════════════════════════════════════════════════╝${NC}"
- echo ""
+ preflight_checks
+ gather_input
- info "Installation log: $LOGFILE"
+ # Unattended installation begins
+ echo ""
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
+ echo "Beginning unattended installation..."
+ echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
- configure_install
- select_disk
- partition_disk
+ partition_disks
create_zfs_pool
create_datasets
mount_efi
install_base
configure_system
+ configure_wifi
+ configure_ssh
configure_initramfs
configure_bootloader
configure_zfs_services
- configure_sanoid
configure_pacman_hook
- create_user
copy_archsetup
- create_syncoid_script
+ sync_efi_partitions
+ create_genesis_snapshot
cleanup
print_summary
}
-# Handle interrupts
trap 'error "Installation interrupted!"' INT TERM
-# Run main
main "$@"
diff --git a/scripts/test-vm.sh b/scripts/test-vm.sh
index 581fa6c..c5ff5e8 100755
--- a/scripts/test-vm.sh
+++ b/scripts/test-vm.sh
@@ -2,9 +2,10 @@
# test-vm.sh - Test the archzfs ISO in a QEMU virtual machine
#
# Usage:
-# ./test-vm.sh # Create new VM and boot ISO
-# ./test-vm.sh --boot-disk # Boot from existing virtual disk (after install)
-# ./test-vm.sh --clean # Remove VM disk and start fresh
+# ./test-vm.sh # Create new VM and boot ISO (single disk)
+# ./test-vm.sh --multi-disk # Create VM with multiple disks for RAID testing
+# ./test-vm.sh --boot-disk # Boot from existing virtual disk (after install)
+# ./test-vm.sh --clean # Remove VM disks and start fresh
set -e
@@ -15,9 +16,13 @@ PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
VM_NAME="archzfs-test"
VM_DIR="$PROJECT_DIR/vm"
VM_DISK="$VM_DIR/$VM_NAME.qcow2"
+VM_DISK2="$VM_DIR/$VM_NAME-disk2.qcow2"
+VM_DISK3="$VM_DIR/$VM_NAME-disk3.qcow2"
VM_DISK_SIZE="50G"
VM_RAM="4096"
VM_CPUS="4"
+MULTI_DISK=false
+NUM_DISKS=1
# UEFI firmware (adjust path for your system)
OVMF_CODE="/usr/share/edk2/x64/OVMF_CODE.4m.fd"
@@ -63,7 +68,7 @@ check_deps() {
fi
}
-# Create VM directory and disk
+# Create VM directory and disk(s)
setup_vm() {
mkdir -p "$VM_DIR"
@@ -74,6 +79,23 @@ setup_vm() {
info "Using existing disk: $VM_DISK"
fi
+ # Create additional disks for multi-disk mode
+ if [[ "$MULTI_DISK" == true ]]; then
+ if [[ ! -f "$VM_DISK2" ]]; then
+ info "Creating virtual disk 2: $VM_DISK2 ($VM_DISK_SIZE)"
+ qemu-img create -f qcow2 "$VM_DISK2" "$VM_DISK_SIZE"
+ else
+ info "Using existing disk 2: $VM_DISK2"
+ fi
+
+ if [[ $NUM_DISKS -ge 3 && ! -f "$VM_DISK3" ]]; then
+ info "Creating virtual disk 3: $VM_DISK3 ($VM_DISK_SIZE)"
+ qemu-img create -f qcow2 "$VM_DISK3" "$VM_DISK_SIZE"
+ elif [[ $NUM_DISKS -ge 3 ]]; then
+ info "Using existing disk 3: $VM_DISK3"
+ fi
+ fi
+
# Copy OVMF vars if needed
if [[ ! -f "$OVMF_VARS" ]]; then
info "Setting up UEFI variables"
@@ -85,6 +107,8 @@ setup_vm() {
clean_vm() {
warn "Removing VM files..."
rm -f "$VM_DISK"
+ rm -f "$VM_DISK2"
+ rm -f "$VM_DISK3"
rm -f "$OVMF_VARS"
info "VM files removed. Ready for fresh install."
}
@@ -94,21 +118,37 @@ boot_iso() {
find_iso
setup_vm
+ local disk_info="$VM_DISK_SIZE"
+ if [[ "$MULTI_DISK" == true ]]; then
+ disk_info="$NUM_DISKS x $VM_DISK_SIZE (RAID testing)"
+ fi
+
info "Starting VM with ISO..."
echo ""
echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo " VM: $VM_NAME"
echo " RAM: ${VM_RAM}MB | CPUs: $VM_CPUS"
- echo " Disk: $VM_DISK_SIZE"
+ echo " Disks: $disk_info"
echo " ISO: $(basename "$ISO_FILE")"
echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo ""
echo "Tips:"
echo " - Press Ctrl+Alt+G to release mouse grab"
echo " - Press Ctrl+Alt+F to toggle fullscreen"
+ echo " - Serial console output appears in this terminal"
+ echo " - SSH: ssh -p 2222 root@localhost (password: archzfs)"
echo " - Run 'install-archzfs' to start installation"
echo ""
+ # Build disk arguments
+ local disk_args=(-drive "file=$VM_DISK,format=qcow2,if=virtio")
+ if [[ "$MULTI_DISK" == true ]]; then
+ disk_args+=(-drive "file=$VM_DISK2,format=qcow2,if=virtio")
+ if [[ $NUM_DISKS -ge 3 ]]; then
+ disk_args+=(-drive "file=$VM_DISK3,format=qcow2,if=virtio")
+ fi
+ fi
+
qemu-system-x86_64 \
-name "$VM_NAME" \
-machine q35,accel=kvm \
@@ -117,13 +157,14 @@ boot_iso() {
-m "$VM_RAM" \
-drive if=pflash,format=raw,readonly=on,file="$OVMF_CODE" \
-drive if=pflash,format=raw,file="$OVMF_VARS" \
- -drive file="$VM_DISK",format=qcow2,if=virtio \
+ "${disk_args[@]}" \
-cdrom "$ISO_FILE" \
-boot d \
-netdev user,id=net0,hostfwd=tcp::2222-:22 \
-device virtio-net-pci,netdev=net0 \
-device virtio-vga-gl \
-display gtk,gl=on \
+ -serial mon:stdio \
-audiodev pipewire,id=audio0 \
-device ich9-intel-hda \
-device hda-duplex,audiodev=audio0 \
@@ -139,11 +180,31 @@ boot_disk() {
error "No disk found. Run without --boot-disk first to install."
fi
+ # Auto-detect multi-disk setup
+ if [[ -f "$VM_DISK2" ]]; then
+ MULTI_DISK=true
+ if [[ -f "$VM_DISK3" ]]; then
+ NUM_DISKS=3
+ else
+ NUM_DISKS=2
+ fi
+ fi
+
info "Booting from installed disk..."
echo ""
- echo "SSH access: ssh -p 2222 localhost"
+ echo "SSH access: ssh -p 2222 root@localhost"
+ echo "Serial console output appears in this terminal"
echo ""
+ # Build disk arguments
+ local disk_args=(-drive "file=$VM_DISK,format=qcow2,if=virtio")
+ if [[ "$MULTI_DISK" == true ]]; then
+ disk_args+=(-drive "file=$VM_DISK2,format=qcow2,if=virtio")
+ if [[ $NUM_DISKS -ge 3 ]]; then
+ disk_args+=(-drive "file=$VM_DISK3,format=qcow2,if=virtio")
+ fi
+ fi
+
qemu-system-x86_64 \
-name "$VM_NAME" \
-machine q35,accel=kvm \
@@ -152,12 +213,13 @@ boot_disk() {
-m "$VM_RAM" \
-drive if=pflash,format=raw,readonly=on,file="$OVMF_CODE" \
-drive if=pflash,format=raw,file="$OVMF_VARS" \
- -drive file="$VM_DISK",format=qcow2,if=virtio \
+ "${disk_args[@]}" \
-boot c \
-netdev user,id=net0,hostfwd=tcp::2222-:22 \
-device virtio-net-pci,netdev=net0 \
-device virtio-vga-gl \
-display gtk,gl=on \
+ -serial mon:stdio \
-audiodev pipewire,id=audio0 \
-device ich9-intel-hda \
-device hda-duplex,audiodev=audio0 \
@@ -170,24 +232,36 @@ show_help() {
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
- echo " (none) Create VM and boot from ISO for installation"
- echo " --boot-disk Boot from existing virtual disk (after install)"
- echo " --clean Remove VM disk and start fresh"
- echo " --help Show this help message"
+ echo " (none) Create VM with single disk and boot from ISO"
+ echo " --multi-disk Create VM with 2 disks for RAID mirror testing"
+ echo " --multi-disk=3 Create VM with 3 disks for RAIDZ testing"
+ echo " --boot-disk Boot from existing virtual disk (after install)"
+ echo " --clean Remove VM disks and start fresh"
+ echo " --help Show this help message"
echo ""
echo "VM Configuration (edit this script to change):"
- echo " Disk size: $VM_DISK_SIZE"
+ echo " Disk size: $VM_DISK_SIZE (per disk)"
echo " RAM: ${VM_RAM}MB"
echo " CPUs: $VM_CPUS"
echo ""
echo "SSH into running VM:"
- echo " ssh -p 2222 localhost"
+ echo " ssh -p 2222 root@localhost (password: archzfs)"
}
# Main
check_deps
case "${1:-}" in
+ --multi-disk)
+ MULTI_DISK=true
+ NUM_DISKS=2
+ boot_iso
+ ;;
+ --multi-disk=3)
+ MULTI_DISK=true
+ NUM_DISKS=3
+ boot_iso
+ ;;
--boot-disk)
boot_disk
;;