Skip to content

Commit 63b5e6c

Browse files
Merge pull request #1795 from dustymabe/dusty-OCPBUGS-54594
[release-4.18] OCPBUGS-54594: update bootloader on aarch64 systems
2 parents 9d8f56f + 7401561 commit 63b5e6c

File tree

3 files changed

+138
-0
lines changed

3 files changed

+138
-0
lines changed

overlay.d/05rhcos/usr/lib/systemd/system-preset/43-manifest-rhcos.preset

+5
Original file line numberDiff line numberDiff line change
@@ -22,3 +22,8 @@ enable nmstate.service
2222
# This unit is not activated on OSTree systems, but it still pulls in
2323
# `network-online.target`. Explicitly disable it.
2424
disable dnf-makecache.timer
25+
26+
# Enable the unit to update the bootloader on aarch64
27+
# machines so they can boot 9.6+ kernels.
28+
# https://issues.redhat.com/browse/OCPBUGS-54594
29+
enable coreos-bootupctl-update-aarch64.service
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
[Unit]
2+
Description=Update Bootloader for aarch64 systems
3+
Documentation=https://issues.redhat.com/browse/OCPBUGS-54594
4+
ConditionArchitecture=arm64
5+
ConditionFirmware=uefi
6+
ConditionKernelCommandLine=!ignition.firstboot
7+
ConditionPathExists=!/run/ostree-live
8+
ConditionPathExists=!/var/lib/coreos-update-bootloader-aarch64-OCPBUGS-54594.stamp
9+
RequiresMountsFor=/boot
10+
11+
[Service]
12+
Type=oneshot
13+
# Only run once regardless of success or failure so we touch
14+
# our stamp file here.
15+
ExecStartPre=touch /var/lib/coreos-update-bootloader-aarch64-OCPBUGS-54594.stamp
16+
ExecStart=/usr/libexec/coreos-update-bootloader
17+
RemainAfterExit=yes
18+
MountFlags=slave
19+
20+
[Install]
21+
WantedBy=multi-user.target
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
#!/bin/bash
2+
set -euo pipefail
3+
4+
# https://issues.redhat.com/browse/OCPBUGS-54594
5+
6+
# This script updates the bootloader using bootupd when it's safe
7+
# and manually otherwise. Right now bootupd doesn't support RAID-1
8+
# setups and also not sure of the behavior if there are multiple
9+
# EFI-SYSTEM labeled filesystems attached.
10+
11+
# Function that actually does the manual copy
12+
copy_to_esp_device() {
13+
local device=$1
14+
mount $device /boot/efi
15+
echo "[Before Update: ${device}]"
16+
find /boot/efi/ -type f | xargs sha256sum
17+
cp -rp /usr/lib/bootupd/updates/EFI /boot/efi
18+
echo "[After Update: ${device}]"
19+
find /boot/efi/ -type f | xargs sha256sum
20+
umount /boot/efi
21+
}
22+
23+
# Handle RAID case manually since bootupd doesn't support it.
24+
# https://github.com/coreos/bootupd/issues/132
25+
update_raid_esp() {
26+
local boot_raid_device=$1
27+
local devices_json="${2}"
28+
echo "Detected boot raid device is: $boot_raid_device"
29+
# Next we'll find all the devices that are a part of that
30+
# RAID array that have an ESP (i.e. a vfat formatted partition
31+
# with a label that starts with "esp-", like "esp-1", "esp-2").
32+
# and we'll capture the device name for the partition.
33+
esp_partitions=$(
34+
jq --arg raid_device "${boot_raid_device}" -r '
35+
.blockdevices[]
36+
| select(.children[]?.children[]?.name == $raid_device)
37+
| .children[]
38+
| select(
39+
(.fstype == "vfat") and
40+
(.parttype == "c12a7328-f81f-11d2-ba4b-00a0c93ec93b")
41+
)
42+
| .name' <<< "${devices_json}")
43+
for part in $esp_partitions; do
44+
echo "Found ESP replica in ${part}; updating"
45+
copy_to_esp_device $part
46+
done
47+
}
48+
49+
main() {
50+
# Grab the info about the systems disks from `lsblk`.
51+
block_devices_json=$(lsblk --paths --output-all --json)
52+
53+
# Find the device the boot filesystem is mounted from
54+
# (i.e. /dev/md126 or /dev/sda3).
55+
boot_fs_device=$(findmnt -n -o SOURCE --target /boot)
56+
57+
# Grab the JSON for the boot partition (i.e. /dev/sda3). This partition
58+
# could hold the filesystem directly or it could be a linux_raid_member
59+
# in which case the $boot_fs_device will be in the "children" of this
60+
# device. Choose .[0] here since we only need to look at the first device
61+
# (only RAID will have more than 1 anyway).
62+
boot_fs_partition_json=$(
63+
jq --arg boot_fs_device "${boot_fs_device}" -r '
64+
[
65+
.blockdevices[].children[]?
66+
| select(
67+
.name == $boot_fs_device or
68+
.children[]?.name == $boot_fs_device
69+
)
70+
] | .[0]' <<< "${block_devices_json}")
71+
if [ "${boot_fs_partition_json}" == "null" ]; then
72+
echo "Couldn't gather information about ${boot_fs_device}" >&2
73+
exit 1
74+
fi
75+
76+
# Grab the partition fstype (useful to determine if it's RAID)
77+
boot_fs_partition_fstype=$(jq -r '.fstype' <<< "${boot_fs_partition_json}")
78+
79+
# Determine how many devices are attached with ESP filesystems.
80+
num_efi_system_devices=$(
81+
jq -r '
82+
[
83+
.blockdevices[]
84+
| select(.children[]?.parttype == "c12a7328-f81f-11d2-ba4b-00a0c93ec93b")
85+
] | length' <<< "${block_devices_json}")
86+
87+
# Now do the updates based on what situation we are in.
88+
if [ "${boot_fs_partition_fstype}" == 'linux_raid_member' ]; then
89+
# If it's RAID we'll update manually.
90+
update_raid_esp $boot_fs_device "${block_devices_json}"
91+
elif [ "${num_efi_system_devices}" -gt 1 ]; then
92+
echo "Detected more than one ESP device in a non-RAID setup"
93+
echo "Falling back to manual copy"
94+
# If there is more than one ESP device in a non-RAID setup
95+
# then we'll need to manually do the copy to make sure we
96+
# copy only to the device we're booted from.
97+
esp_device=$(
98+
jq --arg boot_fs_device "$boot_fs_device" -r '
99+
.blockdevices[]
100+
| select(.children[]?.name == $boot_fs_device)
101+
| .children[]
102+
| select(.parttype == "c12a7328-f81f-11d2-ba4b-00a0c93ec93b")
103+
| .name' <<< "${block_devices_json}")
104+
copy_to_esp_device $esp_device
105+
else
106+
echo "Found ESP; calling 'bootupctl update'"
107+
bootupctl update
108+
fi
109+
sync # write data out to backing devices
110+
}
111+
112+
main

0 commit comments

Comments
 (0)