Initial Debian fork of bootupd for immutable Debian proof-of-concept
This commit is contained in:
commit
ec689d58ee
78 changed files with 8951 additions and 0 deletions
2
bootupd/.cargo/config.toml
Executable file
2
bootupd/.cargo/config.toml
Executable file
|
|
@ -0,0 +1,2 @@
|
|||
[alias]
|
||||
xtask = "run --manifest-path ./xtask/Cargo.toml --"
|
||||
81
bootupd/.cci.jenkinsfile
Executable file
81
bootupd/.cci.jenkinsfile
Executable file
|
|
@ -0,0 +1,81 @@
|
|||
// Documentation: https://github.com/coreos/coreos-ci/blob/main/README-upstream-ci.md
|
||||
|
||||
properties([
|
||||
// abort previous runs when a PR is updated to save resources
|
||||
disableConcurrentBuilds(abortPrevious: true)
|
||||
])
|
||||
|
||||
stage("Build") {
|
||||
parallel build: {
|
||||
def n = 5
|
||||
buildPod(runAsUser: 0, memory: "2Gi", cpu: "${n}") {
|
||||
checkout scm
|
||||
stage("Core build") {
|
||||
shwrap("""
|
||||
make -j ${n}
|
||||
""")
|
||||
}
|
||||
stage("Unit tests") {
|
||||
shwrap("""
|
||||
dnf install -y grub2-tools-minimal
|
||||
cargo test
|
||||
""")
|
||||
}
|
||||
shwrap("""
|
||||
make install-all DESTDIR=\$(pwd)/insttree/
|
||||
tar -c -C insttree/ -zvf insttree.tar.gz .
|
||||
""")
|
||||
stash includes: 'insttree.tar.gz', name: 'build'
|
||||
}
|
||||
},
|
||||
codestyle: {
|
||||
buildPod {
|
||||
checkout scm
|
||||
shwrap("cargo fmt -- --check")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build FCOS and do a kola basic run
|
||||
// FIXME update to main branch once https://github.com/coreos/fedora-coreos-config/pull/595 merges
|
||||
// The FCOS build process is memory-intensive; 6GiB is needed to prevent OOM errors.
|
||||
cosaPod(runAsUser: 0, memory: "6144Mi", cpu: "4") {
|
||||
stage("Build FCOS") {
|
||||
checkout scm
|
||||
unstash 'build'
|
||||
// Note that like {rpm-,}ostree we want to install to both / and overrides/rootfs
|
||||
// because bootupd is used both during the `rpm-ostree compose tree` as well as
|
||||
// inside the target operating system.
|
||||
shwrap("""
|
||||
mkdir insttree
|
||||
tar -C insttree -xzvf insttree.tar.gz
|
||||
rsync -rlv insttree/ /
|
||||
coreos-assembler init --force https://github.com/coreos/fedora-coreos-config
|
||||
mkdir -p overrides/rootfs
|
||||
mv insttree/* overrides/rootfs/
|
||||
rmdir insttree
|
||||
cosa fetch
|
||||
cosa build
|
||||
cosa osbuild metal4k
|
||||
""")
|
||||
}
|
||||
// The e2e-adopt test will use the ostree commit we just generated above
|
||||
// but a static qemu base image.
|
||||
try {
|
||||
// Now a test that upgrades using bootupd
|
||||
stage("e2e upgrade test") {
|
||||
shwrap("""
|
||||
git config --global --add safe.directory "\$(pwd)"
|
||||
env COSA_DIR=${env.WORKSPACE} ./tests/e2e-update/e2e-update.sh
|
||||
""")
|
||||
}
|
||||
stage("Kola testing") {
|
||||
// The previous e2e leaves things only having built an ostree update
|
||||
shwrap("cosa build")
|
||||
// bootupd really can't break upgrades for the OS
|
||||
kola(cosaDir: "${env.WORKSPACE}", extraArgs: "ext.*bootupd*", skipUpgrade: true, skipBasicScenarios: true)
|
||||
}
|
||||
} finally {
|
||||
archiveArtifacts allowEmptyArchive: true, artifacts: 'tmp/console.txt'
|
||||
}
|
||||
}
|
||||
7
bootupd/.copr/Makefile
Executable file
7
bootupd/.copr/Makefile
Executable file
|
|
@ -0,0 +1,7 @@
|
|||
srpm:
|
||||
dnf -y install cargo git openssl-devel
|
||||
# similar to https://github.com/actions/checkout/issues/760, but for COPR
|
||||
git config --global --add safe.directory '*'
|
||||
cargo install cargo-vendor-filterer
|
||||
cargo xtask package-srpm
|
||||
mv target/*.src.rpm $$outdir
|
||||
2
bootupd/.dockerignore
Executable file
2
bootupd/.dockerignore
Executable file
|
|
@ -0,0 +1,2 @@
|
|||
target
|
||||
.cosa
|
||||
12
bootupd/.gemini/config.yaml
Executable file
12
bootupd/.gemini/config.yaml
Executable file
|
|
@ -0,0 +1,12 @@
|
|||
# This config mainly overrides `summary: false` by default
|
||||
# as it's really noisy.
|
||||
have_fun: true
|
||||
code_review:
|
||||
disable: false
|
||||
comment_severity_threshold: MEDIUM
|
||||
max_review_comments: -1
|
||||
pull_request_opened:
|
||||
help: false
|
||||
summary: false # turned off by default
|
||||
code_review: true
|
||||
ignore_patterns: []
|
||||
111
bootupd/.github/ISSUE_TEMPLATE/release-checklist.md
vendored
Executable file
111
bootupd/.github/ISSUE_TEMPLATE/release-checklist.md
vendored
Executable file
|
|
@ -0,0 +1,111 @@
|
|||
# Release process
|
||||
|
||||
The release process follows the usual PR-and-review flow, allowing an external reviewer to have a final check before publishing.
|
||||
|
||||
In order to ease downstream packaging of Rust binaries, an archive of vendored dependencies is also provided (only relevant for offline builds).
|
||||
|
||||
## Requirements
|
||||
|
||||
This guide requires:
|
||||
|
||||
* A web browser (and network connectivity)
|
||||
* `git`
|
||||
* [GPG setup][GPG setup] and personal key for signing
|
||||
* [git-evtag](https://github.com/cgwalters/git-evtag/)
|
||||
* `cargo` (suggested: latest stable toolchain from [rustup][rustup])
|
||||
* `cargo-release` (suggested: `cargo install -f cargo-release`)
|
||||
* `cargo vendor-filterer` (suggested: `cargo install -f cargo-vendor-filterer`)
|
||||
* A verified account on crates.io
|
||||
* Write access to this GitHub project
|
||||
* Upload access to this project on GitHub, crates.io
|
||||
* Membership in the [Fedora CoreOS Crates Owners group](https://github.com/orgs/coreos/teams/fedora-coreos-crates-owners/members)
|
||||
|
||||
## Release checklist
|
||||
|
||||
- Prepare local branch+commit
|
||||
- [ ] `git checkout -b release`
|
||||
- [ ] Bump the version number in `Cargo.toml`. Usually you just want to bump the patch.
|
||||
- [ ] Run `cargo build` to ensure `Cargo.lock` would be updated
|
||||
- [ ] Commit changes `git commit -a -m 'Release x.y.z'`; include some useful brief changelog.
|
||||
|
||||
- Prepare the release
|
||||
- [ ] Run `./ci/prepare-release.sh`
|
||||
|
||||
- Validate that `origin` points to the canonical upstream repository and not your fork:
|
||||
`git remote show origin` should not be `github.com/$yourusername/$project` but should
|
||||
be under the organization ownership. The remote `yourname` should be for your fork.
|
||||
|
||||
- open and merge a PR for this release:
|
||||
- [ ] `git push --set-upstream origin release`
|
||||
- [ ] open a web browser and create a PR for the branch above
|
||||
- [ ] make sure the resulting PR contains the commit
|
||||
- [ ] in the PR body, write a short changelog with relevant changes since last release
|
||||
- [ ] get the PR reviewed, approved and merged
|
||||
|
||||
- publish the artifacts (tag and crate):
|
||||
- [ ] `git fetch origin && git checkout ${RELEASE_COMMIT}`
|
||||
- [ ] verify `Cargo.toml` has the expected version
|
||||
- [ ] `git-evtag sign v${RELEASE_VER}`
|
||||
- [ ] `git push --tags origin v${RELEASE_VER}`
|
||||
- [ ] `cargo publish`
|
||||
|
||||
- publish this release on GitHub:
|
||||
- [ ] find the new tag in the [GitHub tag list](https://github.com/coreos/bootupd/tags), click the triple dots menu, and create a release for it
|
||||
- [ ] write a short changelog with `git shortlog $last_tag..` (i.e. re-use the PR content). See previous releases for format, for example [`v0.2.25`](https://hackmd.io/@hhei/SkYe0AtMye)
|
||||
- [ ] upload `target/${PROJECT}-${RELEASE_VER}-vendor.tar.gz`
|
||||
- [ ] record digests of local artifacts:
|
||||
- `sha256sum target/package/${PROJECT}-${RELEASE_VER}.crate`
|
||||
- `sha256sum target/${PROJECT}-${RELEASE_VER}-vendor.tar.gz`
|
||||
- [ ] publish release
|
||||
|
||||
- clean up:
|
||||
- [ ] `git push origin :release`
|
||||
- [ ] `cargo clean`
|
||||
- [ ] `git checkout main`
|
||||
|
||||
- Fedora packaging:
|
||||
- [ ] update the `rust-bootupd` spec file in [Fedora](https://src.fedoraproject.org/rpms/rust-bootupd)
|
||||
- bump the `Version`
|
||||
- remove any patches obsoleted by the new release
|
||||
- [ ] run `spectool -g -S rust-bootupd.spec`
|
||||
- [ ] run `kinit your_fas_account@FEDORAPROJECT.ORG`
|
||||
- [ ] run `fedpkg new-sources <crate-name> <vendor-tarball-name>`
|
||||
- [ ] PR the changes in [Fedora](https://src.fedoraproject.org/rpms/rust-bootupd)
|
||||
- [ ] once the PR merges to rawhide, merge rawhide into the other relevant branches (e.g. f35) then push those, for example:
|
||||
```bash
|
||||
git checkout rawhide
|
||||
git pull --ff-only
|
||||
git checkout f35
|
||||
git merge --ff-only rawhide
|
||||
git push origin f35
|
||||
```
|
||||
- [ ] on each of those branches run `fedpkg build`
|
||||
- [ ] once the builds have finished, submit them to [bodhi](https://bodhi.fedoraproject.org/updates/new), filling in:
|
||||
- `rust-bootupd` for `Packages`
|
||||
- selecting the build(s) that just completed, except for the rawhide one (which gets submitted automatically)
|
||||
- writing brief release notes like "New upstream release; see release notes at `link to GitHub release`"
|
||||
- leave `Update name` blank
|
||||
- `Type`, `Severity` and `Suggestion` can be left as `unspecified` unless it is a security release. In that case select `security` with the appropriate severity.
|
||||
- `Stable karma` and `Unstable` karma can be set to `2` and `-1`, respectively.
|
||||
- [ ] [submit a fast-track](https://github.com/coreos/fedora-coreos-config/actions/workflows/add-override.yml) for FCOS testing-devel
|
||||
- [ ] [submit a fast-track](https://github.com/coreos/fedora-coreos-config/actions/workflows/add-override.yml) for FCOS next-devel if it is [open](https://github.com/coreos/fedora-coreos-pipeline/blob/main/next-devel/README.md)
|
||||
|
||||
- RHCOS packaging:
|
||||
- [ ] update the `rust-bootupd` spec file
|
||||
- bump the `Version`
|
||||
- switch the `Release` back to `1%{?dist}`
|
||||
- remove any patches obsoleted by the new release
|
||||
- update changelog
|
||||
- [ ] run `spectool -g -S rust-bootupd.spec`
|
||||
- [ ] run `kinit your_account@REDHAT.COM`
|
||||
- [ ] run `rhpkg new-sources <crate-name> <vendor-tarball-name>`
|
||||
- [ ] PR the changes
|
||||
- [ ] get the PR reviewed and merge it
|
||||
- [ ] update your local repo and run `rhpkg build`
|
||||
|
||||
CentOS Stream 9 packaging:
|
||||
- [ ] to be written
|
||||
|
||||
[rustup]: https://rustup.rs/
|
||||
[crates-io]: https://crates.io/
|
||||
[GPG setup]: https://docs.github.com/en/github/authenticating-to-github/managing-commit-signature-verification
|
||||
26
bootupd/.github/dependabot.yml
vendored
Executable file
26
bootupd/.github/dependabot.yml
vendored
Executable file
|
|
@ -0,0 +1,26 @@
|
|||
# Maintained in https://github.com/coreos/repo-templates
|
||||
# Do not edit downstream.
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels: ["skip-notes"]
|
||||
open-pull-requests-limit: 3
|
||||
- package-ecosystem: cargo
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- area/dependencies
|
||||
|
||||
# Group all updates together in a single PR. We can remove some
|
||||
# updates from a combined update PR via comments to dependabot:
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/managing-pull-requests-for-dependency-updates#managing-dependabot-pull-requests-for-grouped-updates-with-comment-commands
|
||||
groups:
|
||||
build:
|
||||
patterns:
|
||||
- "*"
|
||||
101
bootupd/.github/workflows/ci.yml
vendored
Executable file
101
bootupd/.github/workflows/ci.yml
vendored
Executable file
|
|
@ -0,0 +1,101 @@
|
|||
name: CI
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
workflow_dispatch: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
c9s-bootc-e2e:
|
||||
strategy:
|
||||
matrix:
|
||||
runner:
|
||||
- ubuntu-24.04
|
||||
- ubuntu-24.04-arm
|
||||
|
||||
runs-on: [ "${{ matrix.runner }}" ]
|
||||
|
||||
steps:
|
||||
- name: Get a newer podman for heredoc support (from debian testing)
|
||||
run: |
|
||||
set -eux
|
||||
echo 'deb [trusted=yes] https://ftp.debian.org/debian/ testing main' | sudo tee /etc/apt/sources.list.d/testing.list
|
||||
sudo apt update
|
||||
sudo apt install -y crun/testing podman/testing skopeo/testing
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install podman
|
||||
if: ( matrix.runner == 'ubuntu-24.04-arm' )
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y podman
|
||||
|
||||
- name: build
|
||||
run: sudo podman build -t localhost/bootupd:latest -f Dockerfile .
|
||||
|
||||
- name: bootupctl status in container
|
||||
run: |
|
||||
set -xeuo pipefail
|
||||
sudo podman run --rm -v $PWD:/run/src -w /run/src --privileged localhost/bootupd:latest tests/tests/bootupctl-status-in-bootc.sh
|
||||
|
||||
- name: bootc install to disk
|
||||
run: |
|
||||
set -xeuo pipefail
|
||||
sudo truncate -s 10G myimage.raw
|
||||
sudo podman run --rm --privileged -v .:/target --pid=host --security-opt label=disable \
|
||||
-v /var/lib/containers:/var/lib/containers \
|
||||
-v /dev:/dev \
|
||||
localhost/bootupd:latest bootc install to-disk --skip-fetch-check \
|
||||
--disable-selinux --generic-image --via-loopback /target/myimage.raw
|
||||
# Verify we installed grub.cfg and shim on the disk
|
||||
sudo losetup -P -f myimage.raw
|
||||
device=$(losetup -a myimage.raw --output NAME -n)
|
||||
esp_part=$(sudo sfdisk -l -J "${device}" | jq -r '.partitiontable.partitions[] | select(.type == "C12A7328-F81F-11D2-BA4B-00A0C93EC93B").node')
|
||||
sudo mount "${esp_part}" /mnt/
|
||||
arch="$(uname --machine)"
|
||||
if [[ "${arch}" == "x86_64" ]]; then
|
||||
shim="shimx64.efi"
|
||||
else
|
||||
# Assume aarch64 for now
|
||||
shim="shimaa64.efi"
|
||||
fi
|
||||
sudo ls /mnt/EFI/centos/{grub.cfg,${shim}}
|
||||
sudo umount /mnt
|
||||
# check /boot/grub2/grub.cfg permission
|
||||
root_part=$(sudo sfdisk -l -J "${device}" | jq -r '.partitiontable.partitions[] | select(.name == "root").node')
|
||||
sudo mount "${root_part}" /mnt/
|
||||
sudo ls /mnt/boot/grub2/grub.cfg
|
||||
[ $(sudo stat -c "%a" /mnt/boot/grub2/grub.cfg) == "600" ]
|
||||
sudo umount /mnt
|
||||
sudo losetup -D "${device}"
|
||||
sudo rm -f myimage.raw
|
||||
|
||||
- name: bootc install to filesystem
|
||||
run: |
|
||||
set -xeuo pipefail
|
||||
sudo podman run --rm -ti --privileged -v /:/target --pid=host --security-opt label=disable \
|
||||
-v /dev:/dev -v /var/lib/containers:/var/lib/containers \
|
||||
localhost/bootupd:latest bootc install to-filesystem --skip-fetch-check \
|
||||
--acknowledge-destructive \
|
||||
--disable-selinux --replace=alongside /target
|
||||
# Verify we injected static configs
|
||||
jq -re '.["static-configs"].version' /boot/bootupd-state.json
|
||||
[ $(sudo stat -c "%a" /boot/grub2/grub.cfg) == "600" ]
|
||||
|
||||
- name: bootupctl generate-update-metadata
|
||||
run: |
|
||||
set -xeuo pipefail
|
||||
sudo podman run --rm -v $PWD:/run/src -w /run/src --privileged localhost/bootupd:latest tests/tests/move-content-to-usr.sh
|
||||
43
bootupd/.github/workflows/cross.yml
vendored
Executable file
43
bootupd/.github/workflows/cross.yml
vendored
Executable file
|
|
@ -0,0 +1,43 @@
|
|||
name: Cross build
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
|
||||
jobs:
|
||||
crossarch-check:
|
||||
runs-on: ubuntu-22.04
|
||||
name: Build on ${{ matrix.arch }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- arch: s390x
|
||||
distro: ubuntu_latest
|
||||
- arch: ppc64le
|
||||
distro: ubuntu_latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
set-safe-directory: true
|
||||
|
||||
- uses: uraimo/run-on-arch-action@v3.0.0
|
||||
name: Build
|
||||
id: build
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
distro: ${{ matrix.distro }}
|
||||
|
||||
githubToken: ${{ github.token }}
|
||||
|
||||
run: |
|
||||
set -xeu
|
||||
apt update -y
|
||||
apt install -y gcc make curl libssl-dev pkg-config
|
||||
# Install Rust 1.84.1
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.84.1
|
||||
source $HOME/.cargo/env
|
||||
rustc --version
|
||||
cargo check
|
||||
119
bootupd/.github/workflows/rust.yml
vendored
Executable file
119
bootupd/.github/workflows/rust.yml
vendored
Executable file
|
|
@ -0,0 +1,119 @@
|
|||
# Maintained in https://github.com/coreos/repo-templates
|
||||
# Do not edit downstream.
|
||||
|
||||
name: Rust
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# don't waste job slots on superseded code
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
# Pinned toolchain for linting
|
||||
ACTIONS_LINTS_TOOLCHAIN: 1.84.1
|
||||
|
||||
jobs:
|
||||
tests-stable:
|
||||
name: Tests, stable toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
- name: Cache build artifacts
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: cargo build
|
||||
run: cargo build --all-targets
|
||||
- name: cargo test
|
||||
run: cargo test --all-targets
|
||||
tests-release-stable:
|
||||
name: Tests (release), stable toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
- name: Cache build artifacts
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: cargo build (release)
|
||||
run: cargo build --all-targets --release
|
||||
- name: cargo test (release)
|
||||
run: cargo test --all-targets --release
|
||||
tests-release-msrv:
|
||||
name: Tests (release), minimum supported toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Detect crate MSRV
|
||||
run: |
|
||||
msrv=$(cargo metadata --format-version 1 --no-deps | \
|
||||
jq -r '.packages[0].rust_version')
|
||||
echo "Crate MSRV: $msrv"
|
||||
echo "MSRV=$msrv" >> $GITHUB_ENV
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.MSRV }}
|
||||
- name: Cache build artifacts
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: cargo build (release)
|
||||
run: cargo build --all-targets --release
|
||||
- name: cargo test (release)
|
||||
run: cargo test --all-targets --release
|
||||
linting:
|
||||
name: Lints, pinned toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ env.ACTIONS_LINTS_TOOLCHAIN }}
|
||||
components: rustfmt, clippy
|
||||
- name: Cache build artifacts
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: cargo fmt (check)
|
||||
run: cargo fmt -- --check -l
|
||||
- name: cargo clippy (warnings)
|
||||
run: cargo clippy --all-targets -- -D warnings
|
||||
tests-other-channels:
|
||||
name: Tests, unstable toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
matrix:
|
||||
channel: [beta, nightly]
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.channel }}
|
||||
- name: Cache build artifacts
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: cargo build
|
||||
run: cargo build --all-targets
|
||||
- name: cargo test
|
||||
run: cargo test --all-targets
|
||||
4
bootupd/.gitignore
vendored
Executable file
4
bootupd/.gitignore
vendored
Executable file
|
|
@ -0,0 +1,4 @@
|
|||
/target
|
||||
fastbuild*.qcow2
|
||||
_kola_temp
|
||||
.cosa
|
||||
7
bootupd/COPYRIGHT
Executable file
7
bootupd/COPYRIGHT
Executable file
|
|
@ -0,0 +1,7 @@
|
|||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: bootupd
|
||||
Source: https://www.github.com/coreos/bootupd
|
||||
|
||||
Files: *
|
||||
Copyright: 2020 Red Hat, Inc.
|
||||
License: Apache-2.0
|
||||
1631
bootupd/Cargo.lock
generated
Executable file
1631
bootupd/Cargo.lock
generated
Executable file
File diff suppressed because it is too large
Load diff
63
bootupd/Cargo.toml
Executable file
63
bootupd/Cargo.toml
Executable file
|
|
@ -0,0 +1,63 @@
|
|||
[package]
|
||||
name = "bootupd"
|
||||
description = "Bootloader updater"
|
||||
license = "Apache-2.0"
|
||||
version = "0.2.28"
|
||||
authors = ["Colin Walters <walters@verbum.org>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.84.1"
|
||||
homepage = "https://github.com/coreos/bootupd"
|
||||
|
||||
include = ["src", "LICENSE", "Makefile", "systemd"]
|
||||
|
||||
# See https://github.com/coreos/cargo-vendor-filterer
|
||||
[package.metadata.vendor-filter]
|
||||
platforms = ["*-unknown-linux-gnu"]
|
||||
tier = "2"
|
||||
|
||||
[[bin]]
|
||||
name = "bootupd"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3.2"
|
||||
bootc-internal-blockdev = "0.0.0"
|
||||
bootc-internal-utils = "0.0.0"
|
||||
cap-std-ext = "4.0.6"
|
||||
camino = "1.1.10"
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5", default-features = false, features = ["cargo", "derive", "std", "help", "usage", "suggestions"] }
|
||||
env_logger = "0.11"
|
||||
fail = { version = "0.5", features = ["failpoints"] }
|
||||
fn-error-context = "0.2.1"
|
||||
fs2 = "0.4.3"
|
||||
hex = "0.4.3"
|
||||
libc = "^0.2"
|
||||
libsystemd = ">= 0.3, < 0.8"
|
||||
log = "^0.4"
|
||||
openat = "0.1.20"
|
||||
openat-ext = ">= 0.2.2, < 0.3.0"
|
||||
openssl = "^0.10"
|
||||
os-release = "0.1.0"
|
||||
regex = "1.11.1"
|
||||
rustix = { version = "1.0.8", features = ["process", "fs"] }
|
||||
serde = { version = "^1.0", features = ["derive"] }
|
||||
serde_json = "^1.0"
|
||||
tempfile = "^3.20"
|
||||
widestring = "1.2.0"
|
||||
walkdir = "2.3.2"
|
||||
signal-hook-registry = "1.4.5"
|
||||
|
||||
[profile.release]
|
||||
# We assume we're being delivered via e.g. RPM which supports split debuginfo
|
||||
debug = true
|
||||
|
||||
[package.metadata.release]
|
||||
disable-publish = true
|
||||
disable-push = true
|
||||
post-release-commit-message = "cargo: development version bump"
|
||||
pre-release-commit-message = "cargo: bootupd release {{version}}"
|
||||
sign-commit = true
|
||||
sign-tag = true
|
||||
tag-message = "bootupd {{version}}"
|
||||
28
bootupd/Dockerfile
Executable file
28
bootupd/Dockerfile
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
# Build from the current git into a c9s-bootc container image.
|
||||
# Use e.g. --build-arg=base=quay.io/fedora/fedora-bootc:41 to target
|
||||
# Fedora or another base image instead.
|
||||
#
|
||||
ARG base=quay.io/centos-bootc/centos-bootc:stream9
|
||||
|
||||
FROM $base as build
|
||||
# This installs our package dependencies, and we want to cache it independently of the rest.
|
||||
# Basically we don't want changing a .rs file to blow out the cache of packages.
|
||||
RUN <<EORUN
|
||||
set -xeuo pipefail
|
||||
dnf -y install cargo git openssl-devel
|
||||
EORUN
|
||||
# Now copy the source
|
||||
COPY . /build
|
||||
WORKDIR /build
|
||||
# See https://www.reddit.com/r/rust/comments/126xeyx/exploring_the_problem_of_faster_cargo_docker/
|
||||
# We aren't using the full recommendations there, just the simple bits.
|
||||
RUN --mount=type=cache,target=/build/target --mount=type=cache,target=/var/roothome \
|
||||
make && make install-all DESTDIR=/out
|
||||
|
||||
FROM $base
|
||||
# Clean out the default to ensure we're using our updated content
|
||||
RUN rpm -e bootupd
|
||||
COPY --from=build /out/ /
|
||||
# Sanity check this too
|
||||
RUN bootc container lint --fatal-warnings
|
||||
|
||||
202
bootupd/LICENSE
Executable file
202
bootupd/LICENSE
Executable file
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
42
bootupd/Makefile
Executable file
42
bootupd/Makefile
Executable file
|
|
@ -0,0 +1,42 @@
|
|||
DESTDIR ?=
|
||||
PREFIX ?= /usr
|
||||
LIBEXECDIR ?= ${PREFIX}/libexec
|
||||
RELEASE ?= 1
|
||||
CONTAINER_RUNTIME ?= podman
|
||||
IMAGE_PREFIX ?=
|
||||
IMAGE_NAME ?= bootupd-build
|
||||
|
||||
ifeq ($(RELEASE),1)
|
||||
PROFILE ?= release
|
||||
CARGO_ARGS = --release
|
||||
else
|
||||
PROFILE ?= debug
|
||||
CARGO_ARGS =
|
||||
endif
|
||||
|
||||
ifeq ($(CONTAINER_RUNTIME), podman)
|
||||
IMAGE_PREFIX = localhost/
|
||||
endif
|
||||
|
||||
.PHONY: all
|
||||
all:
|
||||
cargo build ${CARGO_ARGS}
|
||||
ln -f target/${PROFILE}/bootupd target/${PROFILE}/bootupctl
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
mkdir -p "${DESTDIR}$(PREFIX)/bin" "${DESTDIR}$(LIBEXECDIR)"
|
||||
install -D -t "${DESTDIR}$(LIBEXECDIR)" target/${PROFILE}/bootupd
|
||||
ln -f ${DESTDIR}$(LIBEXECDIR)/bootupd ${DESTDIR}$(PREFIX)/bin/bootupctl
|
||||
|
||||
.PHONY: install-grub-static
|
||||
install-grub-static:
|
||||
install -m 644 -D -t ${DESTDIR}$(PREFIX)/lib/bootupd/grub2-static src/grub2/*.cfg
|
||||
install -m 644 -D -t ${DESTDIR}$(PREFIX)/lib/bootupd/grub2-static/configs.d src/grub2/configs.d/*.cfg
|
||||
|
||||
.PHONY: install-systemd-unit
|
||||
install-systemd-unit:
|
||||
install -m 644 -D -t "${DESTDIR}$(PREFIX)/lib/systemd/system/" systemd/bootloader-update.service
|
||||
|
||||
.PHONY: install-all
|
||||
install-all: install install-grub-static install-systemd-unit
|
||||
36
bootupd/README-design.md
Executable file
36
bootupd/README-design.md
Executable file
|
|
@ -0,0 +1,36 @@
|
|||
# Overall design
|
||||
|
||||
The initial focus here is updating the [ESP](https://en.wikipedia.org/wiki/EFI_system_partition), but the overall design of bootupd contains a lot of abstraction to support different "components".
|
||||
|
||||
## Ideal case
|
||||
|
||||
In the ideal case, an OS builder uses `bootupd install` to install all bootloader data,
|
||||
and thereafter it is fully (exclusively) managed by bootupd. It would e.g. be a bug/error
|
||||
for an administrator to manually invoke `grub2-install` e.g. again.
|
||||
|
||||
In other words, an end user system would simply invoke `bootupd update` as desired.
|
||||
|
||||
However, we're not in that ideal case. Thus bootupd has the concept of "adoption" where
|
||||
we start tracking the installed state as we find it.
|
||||
|
||||
## Handling adoption
|
||||
|
||||
For Fedora CoreOS, currently the `EFI/fedora/grub.cfg` file is created outside of the ostree inside `create_disk.sh`. So we aren't including any updates for it in the OSTree.
|
||||
|
||||
This type of problem is exactly what bootupd should be solving.
|
||||
|
||||
However, we need to be very cautious in handling this because we basically can't
|
||||
assume we own all of the state. We shouldn't touch any files that we
|
||||
don't know about.
|
||||
|
||||
## Upgrade edges
|
||||
|
||||
We don't necessarily want to update the bootloader data, even if a new update happens to be provided.
|
||||
For example, Fedora does "mass rebuilds" usually once a release, but it's not strictly necessary
|
||||
to update users' bootloaders then.
|
||||
|
||||
A common policy in fact might be "only update bootloader for security issue or if it's strictly necessary".
|
||||
|
||||
A "strictly necessary" upgrade would be one like the GRUB BLS parsing support.
|
||||
|
||||
There is not yet any support for upgrade edges in the code apart from a stub structure.
|
||||
55
bootupd/README-devel.md
Executable file
55
bootupd/README-devel.md
Executable file
|
|
@ -0,0 +1,55 @@
|
|||
# Developing bootupd
|
||||
|
||||
Currently the focus is Fedora CoreOS.
|
||||
|
||||
You can use the normal Rust tools to build and run the unit tests:
|
||||
|
||||
`cargo build` and `cargo test`
|
||||
|
||||
For real e2e testing, use e.g.
|
||||
```
|
||||
export COSA_DIR=/path/to/fcos
|
||||
cosa build-fast
|
||||
kola run -E $(pwd) --qemu-image fastbuild-fedora-coreos-bootupd-qemu.qcow2 --qemu-firmware uefi ext.bootupd.*
|
||||
```
|
||||
|
||||
See also [the coreos-assembler docs](https://coreos.github.io/coreos-assembler/working/#using-overrides).
|
||||
|
||||
## Building With Containers
|
||||
|
||||
There's a reference [Dockerfile](Dockerfile) that builds on [CentOS Stream bootc](https://docs.fedoraproject.org/en-US/bootc/).
|
||||
|
||||
## Integrating bootupd into a distribution/OS
|
||||
|
||||
Today, bootupd only really works on systems that use RPMs and ostree.
|
||||
(Which usually means rpm-ostree, but not strictly necessarily)
|
||||
|
||||
Many bootupd developers (and current CI flows) target Fedora CoreOS
|
||||
and derivatives, so it can be used as a "reference" for integration.
|
||||
|
||||
There's two parts to integration:
|
||||
|
||||
### Generating an update payload
|
||||
|
||||
Bootupd's concept of an "update payload" needs to be generated as
|
||||
part of an OS image (e.g. ostree commit).
|
||||
A good reference for this is
|
||||
https://github.com/coreos/fedora-coreos-config/blob/88af117d1d2c5e828e5e039adfa03c7cc66fc733/manifests/bootupd.yaml#L12
|
||||
|
||||
Specifically, you'll need to invoke
|
||||
`bootupctl backend generate-update-metadata /` as part of update payload generation.
|
||||
This scrapes metadata (e.g. RPM versions) about shim/grub and puts them along with
|
||||
their component files in `/usr/lib/bootupd/updates/`.
|
||||
|
||||
### Installing to generated disk images
|
||||
|
||||
In order to correctly manage updates, bootupd also needs to be responsible
|
||||
for laying out files in initial disk images. A good reference for this is
|
||||
https://github.com/coreos/coreos-assembler/blob/93efb63dcbd63dc04a782e2c6c617ae0cd4a51c8/src/create_disk.sh#L401
|
||||
|
||||
Specifically, you'll need to invoke
|
||||
`/usr/bin/bootupctl backend install --src-root /path/to/ostree/deploy /sysroot`
|
||||
where the first path is an ostree deployment root, and the second is the physical
|
||||
root partition.
|
||||
|
||||
This will e.g. inject the initial files into the mounted EFI system partition.
|
||||
154
bootupd/README.md
Executable file
154
bootupd/README.md
Executable file
|
|
@ -0,0 +1,154 @@
|
|||
# bootupd: Distribution-independent updates for bootloaders
|
||||
|
||||
Today many Linux systems handle updates for bootloader data
|
||||
in an inconsistent and ad-hoc way. For example, on
|
||||
Fedora and Debian, a package manager update will update UEFI
|
||||
binaries in `/boot/efi`, but not the BIOS MBR data.
|
||||
|
||||
Transactional/"image" update systems like [OSTree](https://github.com/ostreedev/ostree/)
|
||||
and dual-partition systems like the Container Linux update system
|
||||
are more consistent: they normally cover kernel/userspace but not anything
|
||||
related to bootloaders.
|
||||
|
||||
The reason for this is straightforward: performing bootloader
|
||||
updates in an "A/B" fashion requires completely separate nontrivial
|
||||
logic from managing the kernel and root filesystem. Today OSTree e.g.
|
||||
makes the choice that it does not update `/boot/efi` (and also doesn't
|
||||
update the BIOS MBR).
|
||||
|
||||
The goal of this project is to be a cross-distribution,
|
||||
OS update system agnostic tool to manage updates for things like:
|
||||
|
||||
- `/boot/efi`
|
||||
- x86 BIOS MBR
|
||||
- Other architecture bootloaders
|
||||
|
||||
This project originated in [this Fedora CoreOS github issue](https://github.com/coreos/fedora-coreos-tracker/issues/510).
|
||||
|
||||
The scope is otherwise limited; for example, bootupd will not
|
||||
manage anything related to the kernel such as kernel arguments;
|
||||
that's for tools like `grubby` and `ostree`.
|
||||
|
||||
## Status
|
||||
|
||||
bootupd supports updating GRUB and shim for UEFI firmware on x86_64, aarch64,
|
||||
and riscv64, and GRUB for BIOS firmware on x86_64 and ppc64le.
|
||||
|
||||
The project is used in Bootable Containers and ostree/rpm-ostree based systems:
|
||||
- [`bootc install`](https://github.com/containers/bootc/#using-bootc-install)
|
||||
- [Fedora CoreOS](https://docs.fedoraproject.org/en-US/fedora-coreos/bootloader-updates/)
|
||||
- Fedora Atomic Desktops
|
||||
|
||||
On systems booted using a UEFI firmware, bootloader updates performed by
|
||||
bootupd are now considered safe, even in case of power failures (see:
|
||||
[issue#454](https://github.com/coreos/bootupd/issues/454)).
|
||||
|
||||
On other systems (BIOS, etc.), bootloader updates performed by bootupd not safe
|
||||
against a power failures at the wrong time.
|
||||
|
||||
Note that bootupd does not yet perform updates in a way that is safe against a
|
||||
buggy bootloader update that fails to boot the system. This is tracked in
|
||||
[issue#440](https://github.com/coreos/bootupd/issues/440).
|
||||
|
||||
Bootloader updates are enabled by default on Fedora Atomic Desktops, and will
|
||||
soon be on all Bootable Containers systems. See
|
||||
[fedora-coreos-tracker#1468](https://github.com/coreos/fedora-coreos-tracker/issues/1468).
|
||||
|
||||
The bootupd CLI should be considered stable.
|
||||
|
||||
## Relationship to other projects
|
||||
|
||||
### dbxtool
|
||||
|
||||
[dbxtool](https://github.com/rhboot/dbxtool) manages updates
|
||||
to the Secure Boot database - `bootupd` will likely need to
|
||||
perform any updates to the `shimx64.efi` binary
|
||||
*before* `dbxtool.service` starts. But otherwise they are independent.
|
||||
|
||||
### fwupd
|
||||
|
||||
bootupd could be compared to [fwupd](https://github.com/fwupd/fwupd/) which is
|
||||
a project that exists today to update hardware device firmware - things not managed
|
||||
by e.g. `apt/zypper/yum/rpm-ostree update` today.
|
||||
|
||||
fwupd comes as a UEFI binary today, so bootupd *could* take care of updating `fwupd`
|
||||
but today fwupd handles that itself. So it's likely that bootupd would only take
|
||||
care of GRUB and shim. See discussion in [this issue](https://github.com/coreos/bootupd/issues/1).
|
||||
|
||||
### systemd bootctl
|
||||
|
||||
[systemd bootctl](https://man7.org/linux/man-pages/man1/bootctl.1.html) can update itself;
|
||||
this project would probably just proxy that if we detect systemd-boot is in use.
|
||||
|
||||
## Other goals
|
||||
|
||||
One idea is that bootupd could help support [redundant bootable disks](https://github.com/coreos/fedora-coreos-tracker/issues/581).
|
||||
For various reasons it doesn't really work to try to use RAID1 for an entire disk; the ESP must be handled
|
||||
specially. `bootupd` could learn how to synchronize multiple EFI system partitions from a primary.
|
||||
|
||||
## More details on rationale and integration
|
||||
|
||||
A notable problem today for [rpm-ostree](https://github.com/coreos/rpm-ostree/) based
|
||||
systems is that `rpm -q shim-x64` is misleading because it's not actually
|
||||
updated in place.
|
||||
|
||||
Particularly [this commit][1] makes things clear - the data
|
||||
from the RPM goes into `/usr` (part of the OSTree), so it doesn't touch `/boot/efi`.
|
||||
But that commit didn't change how the RPM database works (and more generally it
|
||||
would be technically complex for rpm-ostree to change how the RPM database works today).
|
||||
|
||||
What we ultimately want is that `rpm -q shim-x64` returns "not installed" - because
|
||||
it's not managed by RPM or by ostree. Instead one would purely use `bootupctl` to manage it.
|
||||
However, it might still be *built* as an RPM, just not installed that way. The RPM version numbers would be used
|
||||
for the bootupd version associated with the payload, and ultimately we'd teach `rpm-ostree compose tree`
|
||||
how to separately download bootloaders and pass them to `bootupctl backend`.
|
||||
|
||||
[1]: https://github.com/coreos/rpm-ostree/pull/969/commits/dc0e8db5bd92e1f478a0763d1a02b48e57022b59
|
||||
|
||||
|
||||
## Questions and answers
|
||||
|
||||
- Why is bootupd not part of ostree?
|
||||
|
||||
A key advertised feature of ostree is that updates are truly transactional.
|
||||
There's even a [a test case](https://blog.verbum.org/2020/12/01/committed-to-the-integrity-of-your-root-filesystem/)
|
||||
that validates forcibly pulling the power during OS updates. A simple
|
||||
way to look at this is that on an ostree-based system there is no need
|
||||
to have a "please don't power off your computer" screen. This in turn
|
||||
helps administrators to confidently enable automatic updates.
|
||||
|
||||
Doing that for the bootloader (i.e. bootupd's domain) is an *entirely* separate problem.
|
||||
There have been some ideas around how we could make the bootloaders
|
||||
use an A/B type scheme (or at least be more resilient), and perhaps in the future bootupd will
|
||||
use some of those.
|
||||
|
||||
These updates hence carry different levels of risk. In many cases
|
||||
actually it's OK if the bootloader lags behind; we don't need to update
|
||||
every time.
|
||||
|
||||
But out of conservatism currently today for e.g. Fedora CoreOS, bootupd is disabled
|
||||
by default. On the other hand, if your OS update mechanism isn't transactional,
|
||||
then you may want to enable bootupd by default.
|
||||
|
||||
- Is bootupd a daemon?
|
||||
|
||||
It was never a daemon. The name was intended to be "bootloader-upDater" not
|
||||
"bootloader-updater-Daemon". The choice of a "d" suffix is in retrospect
|
||||
probably too confusing.
|
||||
|
||||
bootupd used to have an internally-facing `bootupd.service` and
|
||||
`bootupd.socket` systemd units that acted as a locking mechanism. The service
|
||||
would *very quickly* auto exit. There was nothing long-running, so it was not
|
||||
really a daemon.
|
||||
|
||||
bootupd now uses `systemd-run` instead to guarantee the following:
|
||||
|
||||
- It provides a robust natural "locking" mechanism.
|
||||
- It ensures that critical logging metadata always consistently ends up in the
|
||||
systemd journal, not e.g. a transient client SSH connection.
|
||||
- It benefits from the sandboxing options available for systemd units, and
|
||||
while bootupd is obviously privileged we can still make use of some of this.
|
||||
- If we want a non-CLI API (whether that's DBus or Cap'n Proto or varlink or
|
||||
something else), we will create an independent daemon with a stable API for
|
||||
this specific need.
|
||||
|
||||
6
bootupd/ci/build-test.sh
Executable file
6
bootupd/ci/build-test.sh
Executable file
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
set -xeuo pipefail
|
||||
test -n "${COSA_DIR:-}"
|
||||
make
|
||||
cosa build-fast
|
||||
kola run -E $(pwd) --qemu-image fastbuild-*-qemu.qcow2 --qemu-firmware uefi ext.bootupd.'*'
|
||||
14
bootupd/ci/prepare-release.sh
Executable file
14
bootupd/ci/prepare-release.sh
Executable file
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
# Prepare a release
|
||||
set -euo pipefail
|
||||
cargo publish --dry-run
|
||||
name=$(cargo read-manifest | jq -r .name)
|
||||
version=$(cargo read-manifest | jq -r .version)
|
||||
commit=$(git rev-parse HEAD)
|
||||
|
||||
# Generate a vendor tarball of sources to attach to a release
|
||||
# in order to support offline builds.
|
||||
vendor_dest=target/${name}-${version}-vendor.tar.zstd
|
||||
cargo vendor-filterer --prefix=vendor --format=tar.zstd "${vendor_dest}"
|
||||
|
||||
echo "Prepared ${version} at commit ${commit}"
|
||||
21
bootupd/ci/prow/Dockerfile
Executable file
21
bootupd/ci/prow/Dockerfile
Executable file
|
|
@ -0,0 +1,21 @@
|
|||
FROM quay.io/coreos-assembler/fcos-buildroot:testing-devel as builder
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
RUN make && make install DESTDIR=/cosa/component-install
|
||||
RUN make -C tests/kolainst install DESTDIR=/cosa/component-tests
|
||||
# Uncomment this to fake a build to test the code below
|
||||
# RUN mkdir -p /cosa/component-install/usr/bin && echo foo > /cosa/component-install/usr/bin/foo
|
||||
|
||||
FROM quay.io/coreos-assembler/coreos-assembler:latest
|
||||
WORKDIR /srv
|
||||
# Install our built binaries as overrides for the target build
|
||||
COPY --from=builder /cosa/component-install/ /srv/overrides/rootfs/
|
||||
# Copy and install tests too
|
||||
COPY --from=builder /cosa/component-tests /srv/tmp/component-tests
|
||||
# And fix permissions
|
||||
RUN sudo chown -R builder: /srv/*
|
||||
# Install tests
|
||||
USER root
|
||||
RUN rsync -rlv /srv/tmp/component-tests/ / && rm -rf /srv/tmp/component-tests
|
||||
USER builder
|
||||
COPY --from=builder /src/ci/prow/fcos-e2e.sh /usr/bin/fcos-e2e
|
||||
9
bootupd/ci/prow/fcos-e2e.sh
Executable file
9
bootupd/ci/prow/fcos-e2e.sh
Executable file
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
set -xeuo pipefail
|
||||
|
||||
# Prow jobs don't support adding emptydir today
|
||||
export COSA_SKIP_OVERLAY=1
|
||||
cosa init --force https://github.com/coreos/fedora-coreos-config/
|
||||
cosa fetch
|
||||
cosa build
|
||||
cosa kola run --qemu-firmware uefi 'ext.bootupd.*'
|
||||
61
bootupd/code-of-conduct.md
Executable file
61
bootupd/code-of-conduct.md
Executable file
|
|
@ -0,0 +1,61 @@
|
|||
## CoreOS Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
fostering an open and welcoming community, we pledge to respect all people who
|
||||
contribute through reporting issues, posting feature requests, updating
|
||||
documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free
|
||||
experience for everyone, regardless of level of experience, gender, gender
|
||||
identity and expression, sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently applying these
|
||||
principles to every aspect of managing this project. Project maintainers who do
|
||||
not follow or enforce the Code of Conduct may be permanently removed from the
|
||||
project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting a project maintainer, Brandon Philips
|
||||
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### CoreOS Events Code of Conduct
|
||||
|
||||
CoreOS events are working conferences intended for professional networking and
|
||||
collaboration in the CoreOS community. Attendees are expected to behave
|
||||
according to professional standards and in accordance with their employer’s
|
||||
policies on appropriate workplace behavior.
|
||||
|
||||
While at CoreOS events or related social networking opportunities, attendees
|
||||
should not engage in discriminatory or offensive speech or actions including
|
||||
but not limited to gender, sexuality, race, age, disability, or religion.
|
||||
Speakers should be especially aware of these concerns.
|
||||
|
||||
CoreOS does not condone any statements by speakers contrary to these standards.
|
||||
CoreOS reserves the right to deny entrance and/or eject from an event (without
|
||||
refund) any individual found to be engaging in discriminatory or offensive
|
||||
speech or actions.
|
||||
|
||||
Please bring any concerns to the immediate attention of designated on-site
|
||||
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
82
bootupd/contrib/packaging/bootupd.spec
Executable file
82
bootupd/contrib/packaging/bootupd.spec
Executable file
|
|
@ -0,0 +1,82 @@
|
|||
%bcond_without check
|
||||
|
||||
%global crate bootupd
|
||||
|
||||
Name: rust-%{crate}
|
||||
Version: 0.2.9
|
||||
Release: 1%{?dist}
|
||||
Summary: Bootloader updater
|
||||
|
||||
License: Apache-2.0
|
||||
URL: https://github.com/coreos/bootupd
|
||||
Source0: %{url}/releases/download/v%{version}/bootupd-%{version}.tar.zstd
|
||||
Source1: %{url}/releases/download/v%{version}/bootupd-%{version}-vendor.tar.zstd
|
||||
%if 0%{?fedora} || 0%{?rhel} >= 10
|
||||
ExcludeArch: %{ix86}
|
||||
%endif
|
||||
|
||||
BuildRequires: git-core
|
||||
# For now, see upstream
|
||||
BuildRequires: make
|
||||
BuildRequires: openssl-devel
|
||||
%if 0%{?rhel}
|
||||
BuildRequires: rust-toolset
|
||||
%else
|
||||
BuildRequires: cargo-rpm-macros >= 25
|
||||
%endif
|
||||
BuildRequires: systemd
|
||||
|
||||
%global _description %{expand:
|
||||
Bootloader updater}
|
||||
%description %{_description}
|
||||
|
||||
%package -n %{crate}
|
||||
Summary: %{summary}
|
||||
# Apache-2.0
|
||||
# Apache-2.0 OR BSL-1.0
|
||||
# Apache-2.0 OR MIT
|
||||
# Apache-2.0 WITH LLVM-exception
|
||||
# Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT
|
||||
# BSD-3-Clause
|
||||
# MIT
|
||||
# MIT OR Apache-2.0
|
||||
# Unlicense OR MIT
|
||||
License: Apache-2.0 AND (Apache-2.0 WITH LLVM-exception) AND BSD-3-Clause AND MIT AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT) AND (Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT) AND (Unlicense OR MIT)
|
||||
%{?systemd_requires}
|
||||
|
||||
%description -n %{crate} %{_description}
|
||||
|
||||
%files -n %{crate}
|
||||
%license LICENSE
|
||||
%license LICENSE.dependencies
|
||||
%license cargo-vendor.txt
|
||||
%doc README.md
|
||||
%{_bindir}/bootupctl
|
||||
%{_libexecdir}/bootupd
|
||||
%{_prefix}/lib/bootupd/grub2-static/
|
||||
%{_unitdir}/bootloader-update.service
|
||||
|
||||
%prep
|
||||
%autosetup -n %{crate}-%{version} -p1 -Sgit -a1
|
||||
# Default -v vendor config doesn't support non-crates.io deps (i.e. git)
|
||||
cp .cargo/vendor-config.toml .
|
||||
%cargo_prep -N
|
||||
cat vendor-config.toml >> .cargo/config.toml
|
||||
rm vendor-config.toml
|
||||
|
||||
%build
|
||||
%cargo_build
|
||||
%cargo_vendor_manifest
|
||||
# https://pagure.io/fedora-rust/rust-packaging/issue/33
|
||||
sed -i -e '/https:\/\//d' cargo-vendor.txt
|
||||
%cargo_license_summary
|
||||
%{cargo_license} > LICENSE.dependencies
|
||||
|
||||
%install
|
||||
%make_install INSTALL="install -p -c"
|
||||
%{__make} install-grub-static DESTDIR=%{?buildroot} INSTALL="%{__install} -p"
|
||||
%{__make} install-systemd-unit DESTDIR=%{?buildroot} INSTALL="%{__install} -p"
|
||||
|
||||
%changelog
|
||||
* Tue Oct 18 2022 Colin Walters <walters@verbum.org> - 0.2.8-3
|
||||
- Dummy changelog
|
||||
37
bootupd/doc/dependency_decisions.yml
Executable file
37
bootupd/doc/dependency_decisions.yml
Executable file
|
|
@ -0,0 +1,37 @@
|
|||
---
|
||||
- - :permit
|
||||
- MIT OR Apache-2.0
|
||||
- :who:
|
||||
:why:
|
||||
:versions: []
|
||||
:when: 2021-02-03 19:31:28.263225624 Z
|
||||
- - :permit
|
||||
- Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT
|
||||
- :who:
|
||||
:why:
|
||||
:versions: []
|
||||
:when: 2021-02-03 19:31:42.436851761 Z
|
||||
- - :permit
|
||||
- MIT
|
||||
- :who:
|
||||
:why:
|
||||
:versions: []
|
||||
:when: 2021-02-03 19:31:54.278056841 Z
|
||||
- - :permit
|
||||
- Apache 2.0
|
||||
- :who:
|
||||
:why:
|
||||
:versions: []
|
||||
:when: 2021-02-03 19:32:08.538863728 Z
|
||||
- - :permit
|
||||
- Apache-2.0 OR BSL-1.0
|
||||
- :who:
|
||||
:why:
|
||||
:versions: []
|
||||
:when: 2021-02-03 19:32:17.034417362 Z
|
||||
- - :permit
|
||||
- New BSD
|
||||
- :who:
|
||||
:why:
|
||||
:versions: []
|
||||
:when: 2021-02-03 19:33:02.120977990 Z
|
||||
3
bootupd/src/backend/mod.rs
Executable file
3
bootupd/src/backend/mod.rs
Executable file
|
|
@ -0,0 +1,3 @@
|
|||
//! Internal logic for bootloader and system state manipulation.
|
||||
|
||||
mod statefile;
|
||||
112
bootupd/src/backend/statefile.rs
Executable file
112
bootupd/src/backend/statefile.rs
Executable file
|
|
@ -0,0 +1,112 @@
|
|||
//! On-disk saved state.
|
||||
|
||||
use crate::model::SavedState;
|
||||
use crate::util::SignalTerminationGuard;
|
||||
use anyhow::{bail, Context, Result};
|
||||
use fn_error_context::context;
|
||||
use fs2::FileExt;
|
||||
use openat_ext::OpenatDirExt;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
|
||||
impl SavedState {
|
||||
/// System-wide bootupd write lock (relative to sysroot).
|
||||
const WRITE_LOCK_PATH: &'static str = "run/bootupd-lock";
|
||||
/// Top-level directory for statefile (relative to sysroot).
|
||||
pub(crate) const STATEFILE_DIR: &'static str = "boot";
|
||||
/// On-disk bootloader statefile, akin to a tiny rpm/dpkg database, stored in `/boot`.
|
||||
pub(crate) const STATEFILE_NAME: &'static str = "bootupd-state.json";
|
||||
|
||||
/// Try to acquire a system-wide lock to ensure non-conflicting state updates.
|
||||
///
|
||||
/// While ordinarily the daemon runs as a systemd unit (which implicitly
|
||||
/// ensures a single instance) this is a double check against other
|
||||
/// execution paths.
|
||||
pub(crate) fn acquire_write_lock(sysroot: openat::Dir) -> Result<StateLockGuard> {
|
||||
let lockfile = sysroot.write_file(Self::WRITE_LOCK_PATH, 0o644)?;
|
||||
lockfile.lock_exclusive()?;
|
||||
let guard = StateLockGuard {
|
||||
sysroot,
|
||||
termguard: Some(SignalTerminationGuard::new()?),
|
||||
lockfile: Some(lockfile),
|
||||
};
|
||||
Ok(guard)
|
||||
}
|
||||
|
||||
/// Use this for cases when the target root isn't booted, which is
|
||||
/// offline installs.
|
||||
pub(crate) fn unlocked(sysroot: openat::Dir) -> Result<StateLockGuard> {
|
||||
Ok(StateLockGuard {
|
||||
sysroot,
|
||||
termguard: None,
|
||||
lockfile: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Load the JSON file containing on-disk state.
|
||||
#[context("Loading saved state")]
|
||||
pub(crate) fn load_from_disk(root_path: impl AsRef<Path>) -> Result<Option<SavedState>> {
|
||||
let root_path = root_path.as_ref();
|
||||
let sysroot = openat::Dir::open(root_path)
|
||||
.with_context(|| format!("opening sysroot '{}'", root_path.display()))?;
|
||||
|
||||
let statefile_path = Path::new(Self::STATEFILE_DIR).join(Self::STATEFILE_NAME);
|
||||
let saved_state = if let Some(statusf) = sysroot.open_file_optional(&statefile_path)? {
|
||||
let mut bufr = std::io::BufReader::new(statusf);
|
||||
let mut s = String::new();
|
||||
bufr.read_to_string(&mut s)?;
|
||||
let state: serde_json::Result<SavedState> = serde_json::from_str(s.as_str());
|
||||
let r = match state {
|
||||
Ok(s) => s,
|
||||
Err(orig_err) => {
|
||||
let state: serde_json::Result<crate::model_legacy::SavedState01> =
|
||||
serde_json::from_str(s.as_str());
|
||||
match state {
|
||||
Ok(s) => s.upconvert(),
|
||||
Err(_) => {
|
||||
return Err(orig_err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
Some(r)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(saved_state)
|
||||
}
|
||||
|
||||
/// Check whether statefile exists.
|
||||
pub(crate) fn ensure_not_present(root_path: impl AsRef<Path>) -> Result<()> {
|
||||
let statepath = Path::new(root_path.as_ref())
|
||||
.join(Self::STATEFILE_DIR)
|
||||
.join(Self::STATEFILE_NAME);
|
||||
if statepath.exists() {
|
||||
bail!("{} already exists", statepath.display());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Write-lock guard for statefile, protecting against concurrent state updates.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct StateLockGuard {
|
||||
pub(crate) sysroot: openat::Dir,
|
||||
#[allow(dead_code)]
|
||||
termguard: Option<SignalTerminationGuard>,
|
||||
#[allow(dead_code)]
|
||||
lockfile: Option<File>,
|
||||
}
|
||||
|
||||
impl StateLockGuard {
|
||||
/// Atomically replace the on-disk state with a new version.
|
||||
pub(crate) fn update_state(&mut self, state: &SavedState) -> Result<()> {
|
||||
let subdir = self.sysroot.sub_dir(SavedState::STATEFILE_DIR)?;
|
||||
subdir.write_file_with_sync(SavedState::STATEFILE_NAME, 0o644, |w| -> Result<()> {
|
||||
serde_json::to_writer(w, state)?;
|
||||
Ok(())
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
271
bootupd/src/bios.rs
Executable file
271
bootupd/src/bios.rs
Executable file
|
|
@ -0,0 +1,271 @@
|
|||
use anyhow::{bail, Context, Result};
|
||||
use camino::Utf8PathBuf;
|
||||
use openat_ext::OpenatDirExt;
|
||||
#[cfg(target_arch = "powerpc64")]
|
||||
use std::borrow::Cow;
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::blockdev;
|
||||
use crate::bootupd::RootContext;
|
||||
use crate::component::*;
|
||||
use crate::freezethaw::fsfreeze_thaw_cycle;
|
||||
use crate::grubconfigs;
|
||||
use crate::model::*;
|
||||
use crate::packagesystem;
|
||||
|
||||
// grub2-install file path
|
||||
pub(crate) const GRUB_BIN: &str = "usr/sbin/grub2-install";
|
||||
|
||||
#[cfg(target_arch = "powerpc64")]
|
||||
fn target_device(device: &str) -> Result<Cow<str>> {
|
||||
const PREPBOOT_GUID: &str = "9E1A2D38-C612-4316-AA26-8B49521E5A8B";
|
||||
/// We make a best-effort to support MBR partitioning too.
|
||||
const PREPBOOT_MBR_TYPE: &str = "41";
|
||||
|
||||
// Here we use lsblk to see if the device has any partitions at all
|
||||
let dev = bootc_internal_blockdev::list_dev(device.into())?;
|
||||
if dev.children.is_none() {
|
||||
return Ok(device.into());
|
||||
};
|
||||
// If it does, directly call `sfdisk` and bypass lsblk because inside a container
|
||||
// we may not have all the cached udev state (that I think is in /run).
|
||||
let device = bootc_internal_blockdev::partitions_of(device.into())?;
|
||||
let prepdev = device
|
||||
.partitions
|
||||
.iter()
|
||||
.find(|p| matches!(p.parttype.as_str(), PREPBOOT_GUID | PREPBOOT_MBR_TYPE))
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("Failed to find PReP partition with GUID {PREPBOOT_GUID}")
|
||||
})?;
|
||||
Ok(prepdev.path().as_str().to_owned().into())
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct Bios {}
|
||||
|
||||
impl Bios {
|
||||
// Return `true` if grub2-modules installed
|
||||
fn check_grub_modules(&self) -> Result<bool> {
|
||||
let usr_path = Path::new("/usr/lib/grub");
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
usr_path.join("i386-pc").try_exists().map_err(Into::into)
|
||||
}
|
||||
#[cfg(target_arch = "powerpc64")]
|
||||
{
|
||||
usr_path
|
||||
.join("powerpc-ieee1275")
|
||||
.try_exists()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
// Run grub2-install
|
||||
fn run_grub_install(&self, dest_root: &str, device: &str) -> Result<()> {
|
||||
if !self.check_grub_modules()? {
|
||||
bail!("Failed to find grub2-modules");
|
||||
}
|
||||
let grub_install = Path::new("/").join(GRUB_BIN);
|
||||
if !grub_install.exists() {
|
||||
bail!("Failed to find {:?}", grub_install);
|
||||
}
|
||||
|
||||
let mut cmd = Command::new(grub_install);
|
||||
let boot_dir = Path::new(dest_root).join("boot");
|
||||
// We forcibly inject mdraid1x because it's needed by CoreOS's default of "install raw disk image"
|
||||
// We also add part_gpt because in some cases probing of the partition map can fail such
|
||||
// as in a container, but we always use GPT.
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
cmd.args(["--target", "i386-pc"])
|
||||
.args(["--boot-directory", boot_dir.to_str().unwrap()])
|
||||
.args(["--modules", "mdraid1x part_gpt"])
|
||||
.arg(device);
|
||||
|
||||
#[cfg(target_arch = "powerpc64")]
|
||||
{
|
||||
let device = target_device(device)?;
|
||||
cmd.args(&["--target", "powerpc-ieee1275"])
|
||||
.args(&["--boot-directory", boot_dir.to_str().unwrap()])
|
||||
.arg("--no-nvram")
|
||||
.arg(&*device);
|
||||
}
|
||||
|
||||
let cmdout = cmd.output()?;
|
||||
if !cmdout.status.success() {
|
||||
std::io::stderr().write_all(&cmdout.stderr)?;
|
||||
bail!("Failed to run {:?}", cmd);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Component for Bios {
|
||||
fn name(&self) -> &'static str {
|
||||
"BIOS"
|
||||
}
|
||||
|
||||
fn install(
|
||||
&self,
|
||||
src_root: &openat::Dir,
|
||||
dest_root: &str,
|
||||
device: &str,
|
||||
_update_firmware: bool,
|
||||
) -> Result<InstalledContent> {
|
||||
let Some(meta) = get_component_update(src_root, self)? else {
|
||||
anyhow::bail!("No update metadata for component {} found", self.name());
|
||||
};
|
||||
|
||||
self.run_grub_install(dest_root, device)?;
|
||||
Ok(InstalledContent {
|
||||
meta,
|
||||
filetree: None,
|
||||
adopted_from: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_update_metadata(&self, sysroot_path: &str) -> Result<ContentMetadata> {
|
||||
let grub_install = Path::new(sysroot_path).join(GRUB_BIN);
|
||||
if !grub_install.exists() {
|
||||
bail!("Failed to find {:?}", grub_install);
|
||||
}
|
||||
|
||||
// Query the rpm database and list the package and build times for /usr/sbin/grub2-install
|
||||
let meta = packagesystem::query_files(sysroot_path, [&grub_install])?;
|
||||
write_update_metadata(sysroot_path, self, &meta)?;
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
fn query_adopt(&self, devices: &Option<Vec<String>>) -> Result<Option<Adoptable>> {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
if crate::efi::is_efi_booted()? && devices.is_none() {
|
||||
log::debug!("Skip BIOS adopt");
|
||||
return Ok(None);
|
||||
}
|
||||
crate::component::query_adopt_state()
|
||||
}
|
||||
|
||||
// Backup the current grub.cfg and replace with new static config
|
||||
// - Backup "/boot/loader/grub.cfg" to "/boot/grub2/grub.cfg.bak"
|
||||
// - Remove symlink "/boot/grub2/grub.cfg"
|
||||
// - Replace "/boot/grub2/grub.cfg" symlink with new static "grub.cfg"
|
||||
fn migrate_static_grub_config(&self, sysroot_path: &str, destdir: &openat::Dir) -> Result<()> {
|
||||
let grub = "boot/grub2";
|
||||
// sysroot_path is /, destdir is Dir of /
|
||||
let grub_config_path = Utf8PathBuf::from(sysroot_path).join(grub);
|
||||
let grub_config_dir = destdir.sub_dir(grub).context("Opening boot/grub2")?;
|
||||
|
||||
let grub_config = grub_config_path.join(grubconfigs::GRUBCONFIG);
|
||||
|
||||
if !grub_config.exists() {
|
||||
anyhow::bail!("Could not find '{}'", grub_config);
|
||||
}
|
||||
|
||||
let mut current_config;
|
||||
// If /boot/grub2/grub.cfg is not symlink, we need to keep going
|
||||
if !grub_config.is_symlink() {
|
||||
println!("'{}' is not a symlink", grub_config);
|
||||
current_config = grub_config.clone();
|
||||
} else {
|
||||
// If /boot/grub2/grub.cfg is symlink to /boot/loader/grub.cfg,
|
||||
// backup it to /boot/grub2/grub.cfg.bak
|
||||
// Get real file for symlink /boot/grub2/grub.cfg
|
||||
let real_config = grub_config_dir.read_link(grubconfigs::GRUBCONFIG)?;
|
||||
let real_config =
|
||||
Utf8PathBuf::from_path_buf(real_config).expect("Path should be valid UTF-8");
|
||||
// Resolve symlink location
|
||||
current_config = grub_config_path.clone();
|
||||
current_config.push(real_config);
|
||||
}
|
||||
|
||||
let backup_config = grub_config_path.join(grubconfigs::GRUBCONFIG_BACKUP);
|
||||
if !backup_config.exists() {
|
||||
// Backup the current GRUB config which is hopefully working right now
|
||||
println!(
|
||||
"Creating a backup of the current GRUB config '{}' in '{}'...",
|
||||
current_config, backup_config
|
||||
);
|
||||
std::fs::copy(¤t_config, &backup_config)
|
||||
.context("Failed to backup GRUB config")?;
|
||||
}
|
||||
|
||||
crate::grubconfigs::install(&destdir, None, true)?;
|
||||
|
||||
// Remove the real config if it is symlink and will not
|
||||
// if /boot/grub2/grub.cfg is file
|
||||
if current_config != grub_config {
|
||||
println!("Removing {}", current_config);
|
||||
grub_config_dir.remove_file_optional(current_config.as_std_path())?;
|
||||
}
|
||||
|
||||
// Synchronize the filesystem containing /boot/grub2 to disk.
|
||||
fsfreeze_thaw_cycle(grub_config_dir.open_file(".")?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn adopt_update(
|
||||
&self,
|
||||
rootcxt: &RootContext,
|
||||
update: &ContentMetadata,
|
||||
with_static_config: bool,
|
||||
) -> Result<Option<InstalledContent>> {
|
||||
let bios_devices = blockdev::find_colocated_bios_boot(&rootcxt.devices)?;
|
||||
let Some(meta) = self.query_adopt(&bios_devices)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
for parent in rootcxt.devices.iter() {
|
||||
self.run_grub_install(rootcxt.path.as_str(), &parent)?;
|
||||
log::debug!("Installed grub modules on {parent}");
|
||||
}
|
||||
|
||||
if with_static_config {
|
||||
// Install the static config if the OSTree bootloader is not set.
|
||||
if let Some(bootloader) = crate::ostreeutil::get_ostree_bootloader()? {
|
||||
println!(
|
||||
"ostree repo 'sysroot.bootloader' config option is currently set to: '{bootloader}'",
|
||||
);
|
||||
} else {
|
||||
println!("ostree repo 'sysroot.bootloader' config option is not set yet");
|
||||
self.migrate_static_grub_config(rootcxt.path.as_str(), &rootcxt.sysroot)?;
|
||||
};
|
||||
}
|
||||
Ok(Some(InstalledContent {
|
||||
meta: update.clone(),
|
||||
filetree: None,
|
||||
adopted_from: Some(meta.version),
|
||||
}))
|
||||
}
|
||||
|
||||
fn query_update(&self, sysroot: &openat::Dir) -> Result<Option<ContentMetadata>> {
|
||||
get_component_update(sysroot, self)
|
||||
}
|
||||
|
||||
fn run_update(&self, rootcxt: &RootContext, _: &InstalledContent) -> Result<InstalledContent> {
|
||||
let updatemeta = self
|
||||
.query_update(&rootcxt.sysroot)?
|
||||
.expect("update available");
|
||||
|
||||
for parent in rootcxt.devices.iter() {
|
||||
self.run_grub_install(rootcxt.path.as_str(), &parent)?;
|
||||
log::debug!("Installed grub modules on {parent}");
|
||||
}
|
||||
|
||||
let adopted_from = None;
|
||||
Ok(InstalledContent {
|
||||
meta: updatemeta,
|
||||
filetree: None,
|
||||
adopted_from,
|
||||
})
|
||||
}
|
||||
|
||||
fn validate(&self, _: &InstalledContent) -> Result<ValidationResult> {
|
||||
Ok(ValidationResult::Skip)
|
||||
}
|
||||
|
||||
fn get_efi_vendor(&self, _: &openat::Dir) -> Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
99
bootupd/src/blockdev.rs
Executable file
99
bootupd/src/blockdev.rs
Executable file
|
|
@ -0,0 +1,99 @@
|
|||
use camino::Utf8Path;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use bootc_internal_blockdev::PartitionTable;
|
||||
use fn_error_context::context;
|
||||
|
||||
#[context("get parent devices from mount point boot or sysroot")]
|
||||
pub fn get_devices<P: AsRef<Path>>(target_root: P) -> Result<Vec<String>> {
|
||||
let target_root = target_root.as_ref();
|
||||
let mut source = None;
|
||||
|
||||
for path in ["boot", "sysroot"] {
|
||||
let target_path = target_root.join(path);
|
||||
if !target_path.exists() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let target_dir = openat::Dir::open(&target_path)
|
||||
.with_context(|| format!("Opening {}", target_path.display()))?;
|
||||
if let Ok(fsinfo) = crate::filesystem::inspect_filesystem(&target_dir, ".") {
|
||||
source = Some(fsinfo.source);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let source = match source {
|
||||
Some(s) => s,
|
||||
None => anyhow::bail!("Failed to inspect filesystem from boot or sysroot"),
|
||||
};
|
||||
|
||||
// Find the parent devices of the source path
|
||||
let parent_devices = bootc_internal_blockdev::find_parent_devices(&source)
|
||||
.with_context(|| format!("While looking for backing devices of {}", source))?;
|
||||
log::debug!("Found parent devices: {parent_devices:?}");
|
||||
Ok(parent_devices)
|
||||
}
|
||||
|
||||
/// Find esp partition on the same device
|
||||
/// using sfdisk to get partitiontable
|
||||
pub fn get_esp_partition(device: &str) -> Result<Option<String>> {
|
||||
const ESP_TYPE_GUID: &str = "C12A7328-F81F-11D2-BA4B-00A0C93EC93B";
|
||||
let device_info: PartitionTable =
|
||||
bootc_internal_blockdev::partitions_of(Utf8Path::new(device))?;
|
||||
let esp = device_info
|
||||
.partitions
|
||||
.into_iter()
|
||||
.find(|p| p.parttype.as_str() == ESP_TYPE_GUID);
|
||||
if let Some(esp) = esp {
|
||||
return Ok(Some(esp.node));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Find all ESP partitions on the devices
|
||||
pub fn find_colocated_esps(devices: &Vec<String>) -> Result<Option<Vec<String>>> {
|
||||
// look for all ESPs on those devices
|
||||
let mut esps = Vec::new();
|
||||
for device in devices {
|
||||
if let Some(esp) = get_esp_partition(&device)? {
|
||||
esps.push(esp)
|
||||
}
|
||||
}
|
||||
if esps.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
log::debug!("Found esp partitions: {esps:?}");
|
||||
Ok(Some(esps))
|
||||
}
|
||||
|
||||
/// Find bios_boot partition on the same device
|
||||
pub fn get_bios_boot_partition(device: &str) -> Result<Option<String>> {
|
||||
const BIOS_BOOT_TYPE_GUID: &str = "21686148-6449-6E6F-744E-656564454649";
|
||||
let device_info = bootc_internal_blockdev::partitions_of(Utf8Path::new(device))?;
|
||||
let bios_boot = device_info
|
||||
.partitions
|
||||
.into_iter()
|
||||
.find(|p| p.parttype.as_str() == BIOS_BOOT_TYPE_GUID);
|
||||
if let Some(bios_boot) = bios_boot {
|
||||
return Ok(Some(bios_boot.node));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Find all bios_boot partitions on the devices
|
||||
pub fn find_colocated_bios_boot(devices: &Vec<String>) -> Result<Option<Vec<String>>> {
|
||||
// look for all bios_boot parts on those devices
|
||||
let mut bios_boots = Vec::new();
|
||||
for device in devices {
|
||||
if let Some(bios) = get_bios_boot_partition(&device)? {
|
||||
bios_boots.push(bios)
|
||||
}
|
||||
}
|
||||
if bios_boots.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
log::debug!("Found bios_boot partitions: {bios_boots:?}");
|
||||
Ok(Some(bios_boots))
|
||||
}
|
||||
772
bootupd/src/bootupd.rs
Executable file
772
bootupd/src/bootupd.rs
Executable file
|
|
@ -0,0 +1,772 @@
|
|||
#[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))]
|
||||
use crate::bios;
|
||||
use crate::component;
|
||||
use crate::component::{Component, ValidationResult};
|
||||
use crate::coreos;
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
use crate::efi;
|
||||
use crate::freezethaw::fsfreeze_thaw_cycle;
|
||||
use crate::model::{ComponentStatus, ComponentUpdatable, ContentMetadata, SavedState, Status};
|
||||
use crate::{ostreeutil, util};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use clap::crate_version;
|
||||
use fn_error_context::context;
|
||||
use libc::mode_t;
|
||||
use libc::{S_IRGRP, S_IROTH, S_IRUSR, S_IWUSR};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::borrow::Cow;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{BufRead, BufReader, BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub(crate) enum ConfigMode {
|
||||
None,
|
||||
Static,
|
||||
WithUUID,
|
||||
}
|
||||
|
||||
impl ConfigMode {
|
||||
pub(crate) fn enabled_with_uuid(&self) -> Option<bool> {
|
||||
match self {
|
||||
ConfigMode::None => None,
|
||||
ConfigMode::Static => Some(false),
|
||||
ConfigMode::WithUUID => Some(true),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn install(
|
||||
source_root: &str,
|
||||
dest_root: &str,
|
||||
device: Option<&str>,
|
||||
configs: ConfigMode,
|
||||
update_firmware: bool,
|
||||
target_components: Option<&[String]>,
|
||||
auto_components: bool,
|
||||
) -> Result<()> {
|
||||
// TODO: Change this to an Option<&str>; though this probably balloons into having
|
||||
// DeviceComponent and FileBasedComponent
|
||||
let device = device.unwrap_or("");
|
||||
let source_root = openat::Dir::open(source_root).context("Opening source root")?;
|
||||
SavedState::ensure_not_present(dest_root)
|
||||
.context("failed to install, invalid re-install attempted")?;
|
||||
|
||||
let all_components = get_components_impl(auto_components);
|
||||
if all_components.is_empty() {
|
||||
println!("No components available for this platform.");
|
||||
return Ok(());
|
||||
}
|
||||
let target_components = if let Some(target_components) = target_components {
|
||||
// Checked by CLI parser
|
||||
assert!(!auto_components);
|
||||
target_components
|
||||
.iter()
|
||||
.map(|name| {
|
||||
all_components
|
||||
.get(name.as_str())
|
||||
.ok_or_else(|| anyhow!("Unknown component: {name}"))
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?
|
||||
} else {
|
||||
all_components.values().collect()
|
||||
};
|
||||
|
||||
if target_components.is_empty() && !auto_components {
|
||||
anyhow::bail!("No components specified");
|
||||
}
|
||||
|
||||
let mut state = SavedState::default();
|
||||
let mut installed_efi_vendor = None;
|
||||
for &component in target_components.iter() {
|
||||
// skip for BIOS if device is empty
|
||||
if component.name() == "BIOS" && device.is_empty() {
|
||||
println!(
|
||||
"Skip installing component {} without target device",
|
||||
component.name()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let meta = component
|
||||
.install(&source_root, dest_root, device, update_firmware)
|
||||
.with_context(|| format!("installing component {}", component.name()))?;
|
||||
log::info!("Installed {} {}", component.name(), meta.meta.version);
|
||||
state.installed.insert(component.name().into(), meta);
|
||||
// Yes this is a hack...the Component thing just turns out to be too generic.
|
||||
if let Some(vendor) = component.get_efi_vendor(&source_root)? {
|
||||
assert!(installed_efi_vendor.is_none());
|
||||
installed_efi_vendor = Some(vendor);
|
||||
}
|
||||
}
|
||||
let sysroot = &openat::Dir::open(dest_root)?;
|
||||
|
||||
match configs.enabled_with_uuid() {
|
||||
Some(uuid) => {
|
||||
let meta = get_static_config_meta()?;
|
||||
state.static_configs = Some(meta);
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "powerpc64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
crate::grubconfigs::install(sysroot, installed_efi_vendor.as_deref(), uuid)?;
|
||||
// On other architectures, assume that there's nothing to do.
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
// Unmount the ESP, etc.
|
||||
drop(target_components);
|
||||
|
||||
let mut state_guard =
|
||||
SavedState::unlocked(sysroot.try_clone()?).context("failed to acquire write lock")?;
|
||||
state_guard
|
||||
.update_state(&state)
|
||||
.context("failed to update state")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[context("Get static config metadata")]
|
||||
fn get_static_config_meta() -> Result<ContentMetadata> {
|
||||
let self_bin_meta = std::fs::metadata("/proc/self/exe").context("Querying self meta")?;
|
||||
let self_meta = ContentMetadata {
|
||||
timestamp: self_bin_meta.modified()?.into(),
|
||||
version: crate_version!().into(),
|
||||
};
|
||||
Ok(self_meta)
|
||||
}
|
||||
|
||||
type Components = BTreeMap<&'static str, Box<dyn Component>>;
|
||||
|
||||
#[allow(clippy::box_default)]
|
||||
/// Return the set of known components; if `auto` is specified then the system
|
||||
/// filters to the target booted state.
|
||||
pub(crate) fn get_components_impl(auto: bool) -> Components {
|
||||
let mut components = BTreeMap::new();
|
||||
|
||||
fn insert_component(components: &mut Components, component: Box<dyn Component>) {
|
||||
components.insert(component.name(), component);
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
{
|
||||
if auto {
|
||||
let is_efi_booted = crate::efi::is_efi_booted().unwrap();
|
||||
log::info!(
|
||||
"System boot method: {}",
|
||||
if is_efi_booted { "EFI" } else { "BIOS" }
|
||||
);
|
||||
if is_efi_booted {
|
||||
insert_component(&mut components, Box::new(efi::Efi::default()));
|
||||
} else {
|
||||
insert_component(&mut components, Box::new(bios::Bios::default()));
|
||||
}
|
||||
} else {
|
||||
insert_component(&mut components, Box::new(bios::Bios::default()));
|
||||
insert_component(&mut components, Box::new(efi::Efi::default()));
|
||||
}
|
||||
}
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
|
||||
insert_component(&mut components, Box::new(efi::Efi::default()));
|
||||
|
||||
#[cfg(target_arch = "powerpc64")]
|
||||
insert_component(&mut components, Box::new(bios::Bios::default()));
|
||||
|
||||
components
|
||||
}
|
||||
|
||||
pub(crate) fn get_components() -> Components {
|
||||
get_components_impl(false)
|
||||
}
|
||||
|
||||
pub(crate) fn generate_update_metadata(sysroot_path: &str) -> Result<()> {
|
||||
// create bootupd update dir which will save component metadata files for both components
|
||||
let updates_dir = Path::new(sysroot_path).join(crate::model::BOOTUPD_UPDATES_DIR);
|
||||
std::fs::create_dir_all(&updates_dir)
|
||||
.with_context(|| format!("Failed to create updates dir {:?}", &updates_dir))?;
|
||||
for component in get_components().values() {
|
||||
let v = component.generate_update_metadata(sysroot_path)?;
|
||||
println!(
|
||||
"Generated update layout for {}: {}",
|
||||
component.name(),
|
||||
v.version,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return value from daemon → client for component update
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) enum ComponentUpdateResult {
|
||||
AtLatestVersion,
|
||||
Updated {
|
||||
previous: ContentMetadata,
|
||||
interrupted: Option<ContentMetadata>,
|
||||
new: ContentMetadata,
|
||||
},
|
||||
}
|
||||
|
||||
fn ensure_writable_boot() -> Result<()> {
|
||||
util::ensure_writable_mount("/boot")
|
||||
}
|
||||
|
||||
/// daemon implementation of component update
|
||||
pub(crate) fn update(name: &str, rootcxt: &RootContext) -> Result<ComponentUpdateResult> {
|
||||
let mut state = SavedState::load_from_disk("/")?.unwrap_or_default();
|
||||
let component = component::new_from_name(name)?;
|
||||
let inst = if let Some(inst) = state.installed.get(name) {
|
||||
inst.clone()
|
||||
} else {
|
||||
anyhow::bail!("Component {} is not installed", name);
|
||||
};
|
||||
let sysroot = &rootcxt.sysroot;
|
||||
let update = component.query_update(sysroot)?;
|
||||
let update = match update.as_ref() {
|
||||
Some(p) if inst.meta.can_upgrade_to(p) => p,
|
||||
_ => return Ok(ComponentUpdateResult::AtLatestVersion),
|
||||
};
|
||||
|
||||
ensure_writable_boot()?;
|
||||
|
||||
let mut pending_container = state.pending.take().unwrap_or_default();
|
||||
let interrupted = pending_container.get(component.name()).cloned();
|
||||
pending_container.insert(component.name().into(), update.clone());
|
||||
let sysroot = sysroot.try_clone()?;
|
||||
let mut state_guard =
|
||||
SavedState::acquire_write_lock(sysroot).context("Failed to acquire write lock")?;
|
||||
state_guard
|
||||
.update_state(&state)
|
||||
.context("Failed to update state")?;
|
||||
|
||||
let newinst = component
|
||||
.run_update(rootcxt, &inst)
|
||||
.with_context(|| format!("Failed to update {}", component.name()))?;
|
||||
state.installed.insert(component.name().into(), newinst);
|
||||
pending_container.remove(component.name());
|
||||
state_guard.update_state(&state)?;
|
||||
|
||||
Ok(ComponentUpdateResult::Updated {
|
||||
previous: inst.meta,
|
||||
interrupted,
|
||||
new: update.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// daemon implementation of component adoption
|
||||
pub(crate) fn adopt_and_update(
|
||||
name: &str,
|
||||
rootcxt: &RootContext,
|
||||
with_static_config: bool,
|
||||
) -> Result<Option<ContentMetadata>> {
|
||||
let sysroot = &rootcxt.sysroot;
|
||||
let mut state = SavedState::load_from_disk("/")?.unwrap_or_default();
|
||||
let component = component::new_from_name(name)?;
|
||||
if state.installed.contains_key(name) {
|
||||
anyhow::bail!("Component {} is already installed", name);
|
||||
};
|
||||
|
||||
ensure_writable_boot()?;
|
||||
|
||||
let Some(update) = component.query_update(sysroot)? else {
|
||||
anyhow::bail!("Component {} has no available update", name);
|
||||
};
|
||||
|
||||
let sysroot = sysroot.try_clone()?;
|
||||
let mut state_guard =
|
||||
SavedState::acquire_write_lock(sysroot).context("Failed to acquire write lock")?;
|
||||
|
||||
let inst = component
|
||||
.adopt_update(&rootcxt, &update, with_static_config)
|
||||
.context("Failed adopt and update")?;
|
||||
if let Some(inst) = inst {
|
||||
state.installed.insert(component.name().into(), inst);
|
||||
// Set static_configs metadata and save
|
||||
if with_static_config && state.static_configs.is_none() {
|
||||
let meta = get_static_config_meta()?;
|
||||
state.static_configs = Some(meta);
|
||||
// Set bootloader to none
|
||||
ostreeutil::set_ostree_bootloader("none")?;
|
||||
|
||||
println!("Static GRUB configuration has been adopted successfully.");
|
||||
}
|
||||
state_guard.update_state(&state)?;
|
||||
return Ok(Some(update));
|
||||
} else {
|
||||
// Nothing adopted, skip
|
||||
log::info!("Component '{}' skipped adoption", component.name());
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
/// daemon implementation of component validate
|
||||
pub(crate) fn validate(name: &str) -> Result<ValidationResult> {
|
||||
let state = SavedState::load_from_disk("/")?.unwrap_or_default();
|
||||
let component = component::new_from_name(name)?;
|
||||
let Some(inst) = state.installed.get(name) else {
|
||||
anyhow::bail!("Component {} is not installed", name);
|
||||
};
|
||||
component.validate(inst)
|
||||
}
|
||||
|
||||
pub(crate) fn status() -> Result<Status> {
|
||||
let mut ret: Status = Default::default();
|
||||
let mut known_components = get_components();
|
||||
let sysroot = openat::Dir::open("/")?;
|
||||
let state = SavedState::load_from_disk("/")?;
|
||||
if let Some(state) = state {
|
||||
for (name, ic) in state.installed.iter() {
|
||||
log::trace!("Gathering status for installed component: {}", name);
|
||||
let component = known_components
|
||||
.remove(name.as_str())
|
||||
.ok_or_else(|| anyhow!("Unknown component installed: {}", name))?;
|
||||
let component = component.as_ref();
|
||||
let interrupted = state.pending.as_ref().and_then(|p| p.get(name.as_str()));
|
||||
let update = component.query_update(&sysroot)?;
|
||||
let updatable = ComponentUpdatable::from_metadata(&ic.meta, update.as_ref());
|
||||
let adopted_from = ic.adopted_from.clone();
|
||||
ret.components.insert(
|
||||
name.to_string(),
|
||||
ComponentStatus {
|
||||
installed: ic.meta.clone(),
|
||||
interrupted: interrupted.cloned(),
|
||||
update,
|
||||
updatable,
|
||||
adopted_from,
|
||||
},
|
||||
);
|
||||
}
|
||||
} else {
|
||||
log::trace!("No saved state");
|
||||
}
|
||||
|
||||
// Process the remaining components not installed
|
||||
log::trace!("Remaining known components: {}", known_components.len());
|
||||
for (name, _) in known_components {
|
||||
// To determine if not-installed components can be adopted:
|
||||
//
|
||||
// `query_adopt_state()` checks for existing installation state,
|
||||
// such as a `version` in `/sysroot/.coreos-aleph-version.json`,
|
||||
// or the presence of `/ostree/deploy`.
|
||||
//
|
||||
// `component.query_adopt()` performs additional checks,
|
||||
// including hardware/device requirements.
|
||||
// For example, it will skip BIOS adoption if the system is booted via EFI
|
||||
// and lacks a BIOS_BOOT partition.
|
||||
//
|
||||
// Once a component is determined to be adoptable, it is added to the
|
||||
// adoptable list, and adoption proceeds automatically.
|
||||
//
|
||||
// Therefore, calling `query_adopt_state()` alone is sufficient.
|
||||
if let Some(adopt_ver) = crate::component::query_adopt_state()? {
|
||||
ret.adoptable.insert(name.to_string(), adopt_ver);
|
||||
} else {
|
||||
log::trace!("Not adoptable: {}", name);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub(crate) fn print_status_avail(status: &Status) -> Result<()> {
|
||||
let mut avail = Vec::new();
|
||||
for (name, component) in status.components.iter() {
|
||||
if let ComponentUpdatable::Upgradable = component.updatable {
|
||||
avail.push(name.as_str());
|
||||
}
|
||||
}
|
||||
for (name, adoptable) in status.adoptable.iter() {
|
||||
if adoptable.confident {
|
||||
avail.push(name.as_str());
|
||||
}
|
||||
}
|
||||
if !avail.is_empty() {
|
||||
println!("Updates available: {}", avail.join(" "));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn print_status(status: &Status) -> Result<()> {
|
||||
if status.components.is_empty() {
|
||||
println!("No components installed.");
|
||||
}
|
||||
for (name, component) in status.components.iter() {
|
||||
println!("Component {}", name);
|
||||
println!(" Installed: {}", component.installed.version);
|
||||
|
||||
if let Some(i) = component.interrupted.as_ref() {
|
||||
println!(
|
||||
" WARNING: Previous update to {} was interrupted",
|
||||
i.version
|
||||
);
|
||||
}
|
||||
let msg = match component.updatable {
|
||||
ComponentUpdatable::NoUpdateAvailable => Cow::Borrowed("No update found"),
|
||||
ComponentUpdatable::AtLatestVersion => Cow::Borrowed("At latest version"),
|
||||
ComponentUpdatable::WouldDowngrade => Cow::Borrowed("Ignoring downgrade"),
|
||||
ComponentUpdatable::Upgradable => Cow::Owned(format!(
|
||||
"Available: {}",
|
||||
component.update.as_ref().expect("update").version
|
||||
)),
|
||||
};
|
||||
println!(" Update: {}", msg);
|
||||
}
|
||||
|
||||
if status.adoptable.is_empty() {
|
||||
println!("No components are adoptable.");
|
||||
}
|
||||
for (name, adopt) in status.adoptable.iter() {
|
||||
let ver = &adopt.version.version;
|
||||
if adopt.confident {
|
||||
println!("Detected: {}: {}", name, ver);
|
||||
} else {
|
||||
println!("Adoptable: {}: {}", name, ver);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(coreos_aleph) = coreos::get_aleph_version(Path::new("/"))? {
|
||||
println!("CoreOS aleph version: {}", coreos_aleph.aleph.version);
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
{
|
||||
let boot_method = if efi::is_efi_booted()? { "EFI" } else { "BIOS" };
|
||||
println!("Boot method: {}", boot_method);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct RootContext {
|
||||
pub sysroot: openat::Dir,
|
||||
pub path: Utf8PathBuf,
|
||||
pub devices: Vec<String>,
|
||||
}
|
||||
|
||||
impl RootContext {
|
||||
fn new(sysroot: openat::Dir, path: &str, devices: Vec<String>) -> Self {
|
||||
Self {
|
||||
sysroot,
|
||||
path: Utf8Path::new(path).into(),
|
||||
devices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize parent devices to prepare the update
|
||||
fn prep_before_update() -> Result<RootContext> {
|
||||
let path = "/";
|
||||
let sysroot = openat::Dir::open(path).context("Opening root dir")?;
|
||||
let devices = crate::blockdev::get_devices(path).context("get parent devices")?;
|
||||
Ok(RootContext::new(sysroot, path, devices))
|
||||
}
|
||||
|
||||
pub(crate) fn client_run_update() -> Result<()> {
|
||||
crate::try_fail_point!("update");
|
||||
let rootcxt = prep_before_update()?;
|
||||
let status: Status = status()?;
|
||||
if status.components.is_empty() && status.adoptable.is_empty() {
|
||||
println!("No components installed.");
|
||||
return Ok(());
|
||||
}
|
||||
let mut updated = false;
|
||||
for (name, cstatus) in status.components.iter() {
|
||||
match cstatus.updatable {
|
||||
ComponentUpdatable::Upgradable => {}
|
||||
_ => continue,
|
||||
};
|
||||
match update(name, &rootcxt)? {
|
||||
ComponentUpdateResult::AtLatestVersion => {
|
||||
// Shouldn't happen unless we raced with another client
|
||||
eprintln!(
|
||||
"warning: Expected update for {}, raced with a different client?",
|
||||
name
|
||||
);
|
||||
continue;
|
||||
}
|
||||
ComponentUpdateResult::Updated {
|
||||
previous,
|
||||
interrupted,
|
||||
new,
|
||||
} => {
|
||||
if let Some(i) = interrupted {
|
||||
eprintln!(
|
||||
"warning: Continued from previous interrupted update: {}",
|
||||
i.version,
|
||||
);
|
||||
}
|
||||
println!("Previous {}: {}", name, previous.version);
|
||||
println!("Updated {}: {}", name, new.version);
|
||||
}
|
||||
}
|
||||
updated = true;
|
||||
}
|
||||
for (name, adoptable) in status.adoptable.iter() {
|
||||
if adoptable.confident {
|
||||
if let Some(r) = adopt_and_update(name, &rootcxt, false)? {
|
||||
println!("Adopted and updated: {}: {}", name, r.version);
|
||||
updated = true;
|
||||
}
|
||||
} else {
|
||||
println!("Component {} requires explicit adopt-and-update", name);
|
||||
}
|
||||
}
|
||||
if !updated {
|
||||
println!("No update available for any component.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn client_run_adopt_and_update(with_static_config: bool) -> Result<()> {
|
||||
let rootcxt = prep_before_update()?;
|
||||
let status: Status = status()?;
|
||||
if status.adoptable.is_empty() {
|
||||
println!("No components are adoptable.");
|
||||
} else {
|
||||
for (name, _) in status.adoptable.iter() {
|
||||
if let Some(r) = adopt_and_update(name, &rootcxt, with_static_config)? {
|
||||
println!("Adopted and updated: {}: {}", name, r.version);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn client_run_validate() -> Result<()> {
|
||||
let status: Status = status()?;
|
||||
if status.components.is_empty() {
|
||||
println!("No components installed.");
|
||||
return Ok(());
|
||||
}
|
||||
let mut caught_validation_error = false;
|
||||
for (name, _) in status.components.iter() {
|
||||
match validate(name)? {
|
||||
ValidationResult::Valid => {
|
||||
println!("Validated: {}", name);
|
||||
}
|
||||
ValidationResult::Skip => {
|
||||
println!("Skipped: {}", name);
|
||||
}
|
||||
ValidationResult::Errors(errs) => {
|
||||
for err in errs {
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
caught_validation_error = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if caught_validation_error {
|
||||
anyhow::bail!("Caught validation errors");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[context("Migrating to a static GRUB config")]
|
||||
pub(crate) fn client_run_migrate_static_grub_config() -> Result<()> {
|
||||
// Did we already complete the migration?
|
||||
// We need to migrate if bootloader is not none (or not set)
|
||||
if let Some(bootloader) = ostreeutil::get_ostree_bootloader()? {
|
||||
if bootloader == "none" {
|
||||
println!("Already using a static GRUB config");
|
||||
return Ok(());
|
||||
}
|
||||
println!(
|
||||
"ostree repo 'sysroot.bootloader' config option is currently set to: '{}'",
|
||||
bootloader
|
||||
);
|
||||
} else {
|
||||
println!("ostree repo 'sysroot.bootloader' config option is not set yet");
|
||||
}
|
||||
|
||||
// Remount /boot read write just for this unit (we are called in a slave mount namespace by systemd)
|
||||
ensure_writable_boot()?;
|
||||
|
||||
let grub_config_dir = PathBuf::from("/boot/grub2");
|
||||
let dirfd = openat::Dir::open(&grub_config_dir).context("Opening /boot/grub2")?;
|
||||
|
||||
// We mark the bootloader as BLS capable to disable the ostree-grub2 logic.
|
||||
// We can do that as we know that we are run after the bootloader has been
|
||||
// updated and all recent GRUB2 versions support reading BLS configs.
|
||||
// Ignore errors as this is not critical. This is a safety net if a user
|
||||
// manually overwrites the (soon) static GRUB config by calling `grub2-mkconfig`.
|
||||
// We need this until we can rely on ostree-grub2 being removed from the image.
|
||||
println!("Marking bootloader as BLS capable...");
|
||||
_ = File::create("/boot/grub2/.grub2-blscfg-supported");
|
||||
|
||||
// Migrate /boot/grub2/grub.cfg to a static GRUB config if it is a symlink
|
||||
let grub_config_filename = PathBuf::from("/boot/grub2/grub.cfg");
|
||||
match dirfd.read_link("grub.cfg") {
|
||||
Err(_) => {
|
||||
println!(
|
||||
"'{}' is not a symlink, nothing to migrate",
|
||||
grub_config_filename.display()
|
||||
);
|
||||
}
|
||||
Ok(path) => {
|
||||
println!("Migrating to a static GRUB config...");
|
||||
|
||||
// Resolve symlink location
|
||||
let mut current_config = grub_config_dir.clone();
|
||||
current_config.push(path);
|
||||
|
||||
// Backup the current GRUB config which is hopefully working right now
|
||||
let backup_config = PathBuf::from("/boot/grub2/grub.cfg.backup");
|
||||
println!(
|
||||
"Creating a backup of the current GRUB config '{}' in '{}'...",
|
||||
current_config.display(),
|
||||
backup_config.display()
|
||||
);
|
||||
fs::copy(¤t_config, &backup_config).context("Failed to backup GRUB config")?;
|
||||
|
||||
// Read the current config, strip the ostree generated GRUB entries and
|
||||
// write the result to a temporary file
|
||||
println!("Stripping ostree generated entries from GRUB config...");
|
||||
let stripped_config = "grub.cfg.stripped";
|
||||
let current_config_file =
|
||||
File::open(current_config).context("Could not open current GRUB config")?;
|
||||
let content = BufReader::new(current_config_file);
|
||||
|
||||
strip_grub_config_file(content, &dirfd, stripped_config)?;
|
||||
|
||||
// Atomically replace the symlink
|
||||
dirfd
|
||||
.local_rename(stripped_config, "grub.cfg")
|
||||
.context("Failed to replace symlink with current GRUB config")?;
|
||||
|
||||
fsfreeze_thaw_cycle(dirfd.open_file(".")?)?;
|
||||
|
||||
println!("GRUB config symlink successfully replaced with the current config");
|
||||
}
|
||||
};
|
||||
|
||||
println!("Setting 'sysroot.bootloader' to 'none' in ostree repo config...");
|
||||
ostreeutil::set_ostree_bootloader("none")?;
|
||||
|
||||
println!("Static GRUB config migration completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Writes a stripped GRUB config to `stripped_config_name`, removing lines between
|
||||
/// `### BEGIN /etc/grub.d/15_ostree ###` and `### END /etc/grub.d/15_ostree ###`.
|
||||
fn strip_grub_config_file(
|
||||
current_config_content: impl BufRead,
|
||||
dirfd: &openat::Dir,
|
||||
stripped_config_name: &str,
|
||||
) -> Result<()> {
|
||||
// mode = -rw-r--r-- (644)
|
||||
let mut writer = BufWriter::new(
|
||||
dirfd
|
||||
.write_file(
|
||||
stripped_config_name,
|
||||
(S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) as mode_t,
|
||||
)
|
||||
.context("Failed to open temporary GRUB config")?,
|
||||
);
|
||||
|
||||
let mut skip = false;
|
||||
for line in current_config_content.lines() {
|
||||
let line = line.context("Failed to read line from GRUB config")?;
|
||||
if line == "### END /etc/grub.d/15_ostree ###" {
|
||||
skip = false;
|
||||
continue;
|
||||
}
|
||||
if skip {
|
||||
continue;
|
||||
}
|
||||
if line == "### BEGIN /etc/grub.d/15_ostree ###" {
|
||||
skip = true;
|
||||
continue;
|
||||
}
|
||||
writer
|
||||
.write_all(line.as_bytes())
|
||||
.context("Failed to write stripped GRUB config")?;
|
||||
writer
|
||||
.write_all(b"\n")
|
||||
.context("Failed to write stripped GRUB config")?;
|
||||
}
|
||||
|
||||
writer
|
||||
.into_inner()
|
||||
.context("Failed to flush stripped GRUB config")?
|
||||
.sync_data()
|
||||
.context("Failed to sync stripped GRUB config")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_failpoint_update() {
|
||||
let guard = fail::FailScenario::setup();
|
||||
fail::cfg("update", "return").unwrap();
|
||||
let r = client_run_update();
|
||||
assert_eq!(r.is_err(), true);
|
||||
guard.teardown();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strip_grub_config_file() -> Result<()> {
|
||||
let root: &tempfile::TempDir = &tempfile::tempdir()?;
|
||||
let root_path = root.path();
|
||||
let rootd = openat::Dir::open(root_path)?;
|
||||
let stripped_config = root_path.join("stripped");
|
||||
let content = r"
|
||||
### BEGIN /etc/grub.d/10_linux ###
|
||||
|
||||
### END /etc/grub.d/10_linux ###
|
||||
|
||||
### BEGIN /etc/grub.d/15_ostree ###
|
||||
menuentry 'Red Hat Enterprise Linux CoreOS 4 (ostree)' --class gnu-linux --class gnu --class os --unrestricted 'ostree-0-a92522f9-74dc-456a-ae0c-05ba22bca976' {
|
||||
load_video
|
||||
set gfxpayload=keep
|
||||
insmod gzio
|
||||
insmod part_gpt
|
||||
insmod ext2
|
||||
if [ x$feature_platform_search_hint = xy ]; then
|
||||
search --no-floppy --fs-uuid --set=root a92522f9-74dc-456a-ae0c-05ba22bca976
|
||||
else
|
||||
search --no-floppy --fs-uuid --set=root a92522f9-74dc-456a-ae0c-05ba22bca976
|
||||
fi
|
||||
linuxefi /ostree/rhcos-bf3b382/vmlinuz console=tty0 console=ttyS0,115200n8 rootflags=defaults,prjquota rw $ignition_firstboot root=UUID=cbac8cdc
|
||||
initrdefi /ostree/rhcos-bf3b382/initramfs.img
|
||||
}
|
||||
### END /etc/grub.d/15_ostree ###
|
||||
|
||||
### BEGIN /etc/grub.d/20_linux_xen ###
|
||||
### END /etc/grub.d/20_linux_xen ###";
|
||||
|
||||
strip_grub_config_file(
|
||||
BufReader::new(std::io::Cursor::new(content)),
|
||||
&rootd,
|
||||
stripped_config.to_str().unwrap(),
|
||||
)?;
|
||||
let stripped_content = fs::read_to_string(stripped_config)?;
|
||||
let expected = r"
|
||||
### BEGIN /etc/grub.d/10_linux ###
|
||||
|
||||
### END /etc/grub.d/10_linux ###
|
||||
|
||||
|
||||
### BEGIN /etc/grub.d/20_linux_xen ###
|
||||
### END /etc/grub.d/20_linux_xen ###
|
||||
";
|
||||
assert_eq!(expected, stripped_content);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
222
bootupd/src/cli/bootupctl.rs
Executable file
222
bootupd/src/cli/bootupctl.rs
Executable file
|
|
@ -0,0 +1,222 @@
|
|||
use crate::bootupd;
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use log::LevelFilter;
|
||||
|
||||
use std::os::unix::process::CommandExt;
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
static SYSTEMD_ARGS_BOOTUPD: &[&str] = &["--unit", "bootupd", "--pipe"];
|
||||
|
||||
/// Keep these properties (isolation/runtime state) in sync with
|
||||
/// the systemd units in contrib/packaging/*.service
|
||||
static SYSTEMD_PROPERTIES: &[&str] = &[
|
||||
"PrivateNetwork=yes",
|
||||
"ProtectHome=yes",
|
||||
// While only our main process during update catches SIGTERM, we don't
|
||||
// want systemd to send it to other processes.
|
||||
"KillMode=mixed",
|
||||
"MountFlags=slave",
|
||||
];
|
||||
|
||||
/// `bootupctl` sub-commands.
|
||||
#[derive(Debug, Parser)]
|
||||
#[clap(name = "bootupctl", about = "Bootupd client application", version)]
|
||||
pub struct CtlCommand {
|
||||
/// Verbosity level (higher is more verbose).
|
||||
#[clap(short = 'v', action = clap::ArgAction::Count, global = true)]
|
||||
verbosity: u8,
|
||||
|
||||
/// CLI sub-command.
|
||||
#[clap(subcommand)]
|
||||
pub cmd: CtlVerb,
|
||||
}
|
||||
|
||||
impl CtlCommand {
|
||||
/// Return the log-level set via command-line flags.
|
||||
pub(crate) fn loglevel(&self) -> LevelFilter {
|
||||
match self.verbosity {
|
||||
0 => LevelFilter::Warn,
|
||||
1 => LevelFilter::Info,
|
||||
2 => LevelFilter::Debug,
|
||||
_ => LevelFilter::Trace,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// CLI sub-commands.
|
||||
#[derive(Debug, Parser)]
|
||||
pub enum CtlVerb {
|
||||
// FIXME(lucab): drop this after refreshing
|
||||
// https://github.com/coreos/fedora-coreos-config/pull/595
|
||||
#[clap(name = "backend", hide = true, subcommand)]
|
||||
Backend(CtlBackend),
|
||||
#[clap(name = "status", about = "Show components status")]
|
||||
Status(StatusOpts),
|
||||
#[clap(name = "update", about = "Update all components")]
|
||||
Update,
|
||||
#[clap(name = "adopt-and-update", about = "Update all adoptable components")]
|
||||
AdoptAndUpdate(AdoptAndUpdateOpts),
|
||||
#[clap(name = "validate", about = "Validate system state")]
|
||||
Validate,
|
||||
#[clap(
|
||||
name = "migrate-static-grub-config",
|
||||
hide = true,
|
||||
about = "Migrate a system to a static GRUB config"
|
||||
)]
|
||||
MigrateStaticGrubConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub enum CtlBackend {
|
||||
#[clap(name = "generate-update-metadata", hide = true)]
|
||||
Generate(super::bootupd::GenerateOpts),
|
||||
#[clap(name = "install", hide = true)]
|
||||
Install(super::bootupd::InstallOpts),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct StatusOpts {
|
||||
/// If there are updates available, output `Updates available: ` to standard output;
|
||||
/// otherwise output nothing. Avoid parsing this, just check whether or not
|
||||
/// the output is empty.
|
||||
#[clap(long, action)]
|
||||
print_if_available: bool,
|
||||
|
||||
/// Output JSON
|
||||
#[clap(long, action)]
|
||||
json: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct AdoptAndUpdateOpts {
|
||||
/// Install the static GRUB config files
|
||||
#[clap(long, action)]
|
||||
with_static_config: bool,
|
||||
}
|
||||
|
||||
impl CtlCommand {
|
||||
/// Run CLI application.
|
||||
pub fn run(self) -> Result<()> {
|
||||
match self.cmd {
|
||||
CtlVerb::Status(opts) => Self::run_status(opts),
|
||||
CtlVerb::Update => Self::run_update(),
|
||||
CtlVerb::AdoptAndUpdate(opts) => Self::run_adopt_and_update(opts),
|
||||
CtlVerb::Validate => Self::run_validate(),
|
||||
CtlVerb::Backend(CtlBackend::Generate(opts)) => {
|
||||
super::bootupd::DCommand::run_generate_meta(opts)
|
||||
}
|
||||
CtlVerb::Backend(CtlBackend::Install(opts)) => {
|
||||
super::bootupd::DCommand::run_install(opts)
|
||||
}
|
||||
CtlVerb::MigrateStaticGrubConfig => Self::run_migrate_static_grub_config(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Runner for `status` verb.
|
||||
fn run_status(opts: StatusOpts) -> Result<()> {
|
||||
if crate::util::running_in_container() {
|
||||
return run_status_in_container(opts.json);
|
||||
}
|
||||
ensure_running_in_systemd()?;
|
||||
let r = bootupd::status()?;
|
||||
if opts.json {
|
||||
let stdout = std::io::stdout();
|
||||
let mut stdout = stdout.lock();
|
||||
serde_json::to_writer_pretty(&mut stdout, &r)?;
|
||||
} else if opts.print_if_available {
|
||||
bootupd::print_status_avail(&r)?;
|
||||
} else {
|
||||
bootupd::print_status(&r)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runner for `update` verb.
|
||||
fn run_update() -> Result<()> {
|
||||
ensure_running_in_systemd()?;
|
||||
bootupd::client_run_update()
|
||||
}
|
||||
|
||||
/// Runner for `update` verb.
|
||||
fn run_adopt_and_update(opts: AdoptAndUpdateOpts) -> Result<()> {
|
||||
ensure_running_in_systemd()?;
|
||||
bootupd::client_run_adopt_and_update(opts.with_static_config)
|
||||
}
|
||||
|
||||
/// Runner for `validate` verb.
|
||||
fn run_validate() -> Result<()> {
|
||||
ensure_running_in_systemd()?;
|
||||
bootupd::client_run_validate()
|
||||
}
|
||||
|
||||
/// Runner for `migrate-static-grub-config` verb.
|
||||
fn run_migrate_static_grub_config() -> Result<()> {
|
||||
ensure_running_in_systemd()?;
|
||||
bootupd::client_run_migrate_static_grub_config()
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the current process is (apparently at least)
|
||||
/// running under systemd.
|
||||
fn running_in_systemd() -> bool {
|
||||
std::env::var_os("INVOCATION_ID").is_some()
|
||||
}
|
||||
|
||||
/// Require root permission
|
||||
fn require_root_permission() -> Result<()> {
|
||||
if !rustix::process::getuid().is_root() {
|
||||
anyhow::bail!("This command requires root privileges")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Detect if we're running in systemd; if we're not, we re-exec ourselves via
|
||||
/// systemd-run. Then we can just directly run code in what is now the daemon.
|
||||
fn ensure_running_in_systemd() -> Result<()> {
|
||||
require_root_permission()?;
|
||||
let running_in_systemd = running_in_systemd();
|
||||
if !running_in_systemd {
|
||||
// Clear any failure status that may have happened previously
|
||||
let _r = Command::new("systemctl")
|
||||
.arg("reset-failed")
|
||||
.arg("bootupd.service")
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()?
|
||||
.wait()?;
|
||||
let r = Command::new("systemd-run")
|
||||
.args(SYSTEMD_ARGS_BOOTUPD)
|
||||
.args(
|
||||
SYSTEMD_PROPERTIES
|
||||
.into_iter()
|
||||
.flat_map(|&v| ["--property", v]),
|
||||
)
|
||||
.args(std::env::args())
|
||||
.exec();
|
||||
// If we got here, it's always an error
|
||||
return Err(r.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// If running in container, just print the available payloads
|
||||
fn run_status_in_container(json_format: bool) -> Result<()> {
|
||||
let all_components = crate::bootupd::get_components();
|
||||
if all_components.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let avail: Vec<_> = all_components.keys().cloned().collect();
|
||||
if json_format {
|
||||
let stdout = std::io::stdout();
|
||||
let mut stdout = stdout.lock();
|
||||
let output: serde_json::Value = serde_json::json!({
|
||||
"components": avail
|
||||
});
|
||||
serde_json::to_writer(&mut stdout, &output)?;
|
||||
} else {
|
||||
println!("Available components: {}", avail.join(" "));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
125
bootupd/src/cli/bootupd.rs
Executable file
125
bootupd/src/cli/bootupd.rs
Executable file
|
|
@ -0,0 +1,125 @@
|
|||
use crate::bootupd::{self, ConfigMode};
|
||||
use anyhow::{Context, Result};
|
||||
use clap::Parser;
|
||||
use log::LevelFilter;
|
||||
|
||||
/// `bootupd` sub-commands.
|
||||
#[derive(Debug, Parser)]
|
||||
#[clap(name = "bootupd", about = "Bootupd backend commands", version)]
|
||||
pub struct DCommand {
|
||||
/// Verbosity level (higher is more verbose).
|
||||
#[clap(short = 'v', action = clap::ArgAction::Count, global = true)]
|
||||
verbosity: u8,
|
||||
|
||||
/// CLI sub-command.
|
||||
#[clap(subcommand)]
|
||||
pub cmd: DVerb,
|
||||
}
|
||||
|
||||
impl DCommand {
|
||||
/// Return the log-level set via command-line flags.
|
||||
pub(crate) fn loglevel(&self) -> LevelFilter {
|
||||
match self.verbosity {
|
||||
0 => LevelFilter::Warn,
|
||||
1 => LevelFilter::Info,
|
||||
2 => LevelFilter::Debug,
|
||||
_ => LevelFilter::Trace,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// CLI sub-commands.
|
||||
#[derive(Debug, Parser)]
|
||||
pub enum DVerb {
|
||||
#[clap(name = "generate-update-metadata", about = "Generate metadata")]
|
||||
GenerateUpdateMetadata(GenerateOpts),
|
||||
#[clap(name = "install", about = "Install components")]
|
||||
Install(InstallOpts),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct InstallOpts {
|
||||
/// Source root
|
||||
#[clap(long, value_parser, default_value_t = String::from("/"))]
|
||||
src_root: String,
|
||||
/// Target root
|
||||
#[clap(value_parser)]
|
||||
dest_root: String,
|
||||
|
||||
/// Target device, used by bios bootloader installation
|
||||
#[clap(long)]
|
||||
device: Option<String>,
|
||||
|
||||
/// Enable installation of the built-in static config files
|
||||
#[clap(long)]
|
||||
with_static_configs: bool,
|
||||
|
||||
/// Implies `--with-static-configs`. When present, this also writes a
|
||||
/// file with the UUID of the target filesystems.
|
||||
#[clap(long)]
|
||||
write_uuid: bool,
|
||||
|
||||
/// On EFI systems, invoke `efibootmgr` to update the firmware.
|
||||
#[clap(long)]
|
||||
update_firmware: bool,
|
||||
|
||||
#[clap(long = "component", conflicts_with = "auto")]
|
||||
/// Only install these components
|
||||
components: Option<Vec<String>>,
|
||||
|
||||
/// Automatically choose components based on booted host state.
|
||||
///
|
||||
/// For example on x86_64, if the host system is booted via EFI,
|
||||
/// then only enable installation to the ESP.
|
||||
#[clap(long)]
|
||||
auto: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct GenerateOpts {
|
||||
/// Physical root mountpoint
|
||||
#[clap(value_parser)]
|
||||
sysroot: Option<String>,
|
||||
}
|
||||
|
||||
impl DCommand {
|
||||
/// Run CLI application.
|
||||
pub fn run(self) -> Result<()> {
|
||||
match self.cmd {
|
||||
DVerb::Install(opts) => Self::run_install(opts),
|
||||
DVerb::GenerateUpdateMetadata(opts) => Self::run_generate_meta(opts),
|
||||
}
|
||||
}
|
||||
|
||||
/// Runner for `generate-install-metadata` verb.
|
||||
pub(crate) fn run_generate_meta(opts: GenerateOpts) -> Result<()> {
|
||||
let sysroot = opts.sysroot.as_deref().unwrap_or("/");
|
||||
if sysroot != "/" {
|
||||
anyhow::bail!("Using a non-default sysroot is not supported: {}", sysroot);
|
||||
}
|
||||
bootupd::generate_update_metadata(sysroot).context("generating metadata failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runner for `install` verb.
|
||||
pub(crate) fn run_install(opts: InstallOpts) -> Result<()> {
|
||||
let configmode = if opts.write_uuid {
|
||||
ConfigMode::WithUUID
|
||||
} else if opts.with_static_configs {
|
||||
ConfigMode::Static
|
||||
} else {
|
||||
ConfigMode::None
|
||||
};
|
||||
bootupd::install(
|
||||
&opts.src_root,
|
||||
&opts.dest_root,
|
||||
opts.device.as_deref(),
|
||||
configmode,
|
||||
opts.update_firmware,
|
||||
opts.components.as_deref(),
|
||||
opts.auto,
|
||||
)
|
||||
.context("boot data installation failed")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
107
bootupd/src/cli/mod.rs
Executable file
107
bootupd/src/cli/mod.rs
Executable file
|
|
@ -0,0 +1,107 @@
|
|||
//! Command-line interface (CLI) logic.
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use log::LevelFilter;
|
||||
mod bootupctl;
|
||||
mod bootupd;
|
||||
|
||||
/// Top-level multicall CLI.
|
||||
#[derive(Debug, Parser)]
|
||||
pub enum MultiCall {
|
||||
Ctl(bootupctl::CtlCommand),
|
||||
D(bootupd::DCommand),
|
||||
}
|
||||
|
||||
impl MultiCall {
|
||||
pub fn from_args(args: Vec<String>) -> Self {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
// This is a multicall binary, dispatched based on the introspected
|
||||
// filename found in argv[0].
|
||||
let exe_name = {
|
||||
let arg0 = args.get(0).cloned().unwrap_or_default();
|
||||
let exe_path = std::path::PathBuf::from(arg0);
|
||||
exe_path.file_name().unwrap_or_default().to_os_string()
|
||||
};
|
||||
#[allow(clippy::wildcard_in_or_patterns)]
|
||||
match exe_name.as_bytes() {
|
||||
b"bootupctl" => MultiCall::Ctl(bootupctl::CtlCommand::parse_from(args)),
|
||||
b"bootupd" | _ => MultiCall::D(bootupd::DCommand::parse_from(args)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run(self) -> Result<()> {
|
||||
match self {
|
||||
MultiCall::Ctl(ctl_cmd) => ctl_cmd.run(),
|
||||
MultiCall::D(d_cmd) => d_cmd.run(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the log-level set via command-line flags.
|
||||
pub fn loglevel(&self) -> LevelFilter {
|
||||
match self {
|
||||
MultiCall::Ctl(cmd) => cmd.loglevel(),
|
||||
MultiCall::D(cmd) => cmd.loglevel(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn clap_apps() {
|
||||
use clap::CommandFactory;
|
||||
bootupctl::CtlCommand::command().debug_assert();
|
||||
bootupd::DCommand::command().debug_assert();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multicall_dispatch() {
|
||||
{
|
||||
let d_argv = vec![
|
||||
"/usr/bin/bootupd".to_string(),
|
||||
"generate-update-metadata".to_string(),
|
||||
];
|
||||
let cli = MultiCall::from_args(d_argv);
|
||||
match cli {
|
||||
MultiCall::Ctl(cmd) => panic!("{:?}", cmd),
|
||||
MultiCall::D(_) => {}
|
||||
};
|
||||
}
|
||||
{
|
||||
let ctl_argv = vec!["/usr/bin/bootupctl".to_string(), "validate".to_string()];
|
||||
let cli = MultiCall::from_args(ctl_argv);
|
||||
match cli {
|
||||
MultiCall::Ctl(_) => {}
|
||||
MultiCall::D(cmd) => panic!("{:?}", cmd),
|
||||
};
|
||||
}
|
||||
{
|
||||
let ctl_argv = vec!["/bin-mount/bootupctl".to_string(), "validate".to_string()];
|
||||
let cli = MultiCall::from_args(ctl_argv);
|
||||
match cli {
|
||||
MultiCall::Ctl(_) => {}
|
||||
MultiCall::D(cmd) => panic!("{:?}", cmd),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verbosity() {
|
||||
let default = MultiCall::from_args(vec![
|
||||
"bootupd".to_string(),
|
||||
"generate-update-metadata".to_string(),
|
||||
]);
|
||||
assert_eq!(default.loglevel(), LevelFilter::Warn);
|
||||
|
||||
let info = MultiCall::from_args(vec![
|
||||
"bootupd".to_string(),
|
||||
"generate-update-metadata".to_string(),
|
||||
"-v".to_string(),
|
||||
]);
|
||||
assert_eq!(info.loglevel(), LevelFilter::Info);
|
||||
}
|
||||
}
|
||||
232
bootupd/src/component.rs
Executable file
232
bootupd/src/component.rs
Executable file
|
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use fn_error_context::context;
|
||||
use openat_ext::OpenatDirExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use crate::{bootupd::RootContext, model::*};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) enum ValidationResult {
|
||||
Valid,
|
||||
Skip,
|
||||
Errors(Vec<String>),
|
||||
}
|
||||
|
||||
/// A component along with a possible update
|
||||
pub(crate) trait Component {
|
||||
/// Returns the name of the component; this will be used for serialization
|
||||
/// and should remain stable.
|
||||
fn name(&self) -> &'static str;
|
||||
|
||||
/// In an operating system whose initially booted disk image is not
|
||||
/// using bootupd, detect whether it looks like the component exists
|
||||
/// and "synthesize" content metadata from it.
|
||||
fn query_adopt(&self, devices: &Option<Vec<String>>) -> Result<Option<Adoptable>>;
|
||||
|
||||
// Backup the current grub config, and install static grub config from tree
|
||||
fn migrate_static_grub_config(&self, sysroot_path: &str, destdir: &openat::Dir) -> Result<()>;
|
||||
|
||||
/// Given an adoptable system and an update, perform the update.
|
||||
fn adopt_update(
|
||||
&self,
|
||||
rootcxt: &RootContext,
|
||||
update: &ContentMetadata,
|
||||
with_static_config: bool,
|
||||
) -> Result<Option<InstalledContent>>;
|
||||
|
||||
/// Implementation of `bootupd install` for a given component. This should
|
||||
/// gather data (or run binaries) from the source root, and install them
|
||||
/// into the target root. It is expected that sub-partitions (e.g. the ESP)
|
||||
/// are mounted at the expected place. For operations that require a block device instead
|
||||
/// of a filesystem root, the component should query the mount point to
|
||||
/// determine the block device.
|
||||
/// This will be run during a disk image build process.
|
||||
fn install(
|
||||
&self,
|
||||
src_root: &openat::Dir,
|
||||
dest_root: &str,
|
||||
device: &str,
|
||||
update_firmware: bool,
|
||||
) -> Result<InstalledContent>;
|
||||
|
||||
/// Implementation of `bootupd generate-update-metadata` for a given component.
|
||||
/// This expects to be run during an "image update build" process. For CoreOS
|
||||
/// this is an `rpm-ostree compose tree` for example. For a dual-partition
|
||||
/// style updater, this would be run as part of a postprocessing step
|
||||
/// while the filesystem for the partition is mounted.
|
||||
fn generate_update_metadata(&self, sysroot: &str) -> Result<ContentMetadata>;
|
||||
|
||||
/// Used on the client to query for an update cached in the current booted OS.
|
||||
fn query_update(&self, sysroot: &openat::Dir) -> Result<Option<ContentMetadata>>;
|
||||
|
||||
/// Used on the client to run an update.
|
||||
fn run_update(
|
||||
&self,
|
||||
rootcxt: &RootContext,
|
||||
current: &InstalledContent,
|
||||
) -> Result<InstalledContent>;
|
||||
|
||||
/// Used on the client to validate an installed version.
|
||||
fn validate(&self, current: &InstalledContent) -> Result<ValidationResult>;
|
||||
|
||||
/// Locating efi vendor dir
|
||||
fn get_efi_vendor(&self, sysroot: &openat::Dir) -> Result<Option<String>>;
|
||||
}
|
||||
|
||||
/// Given a component name, create an implementation.
|
||||
pub(crate) fn new_from_name(name: &str) -> Result<Box<dyn Component>> {
|
||||
let r: Box<dyn Component> = match name {
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
#[allow(clippy::box_default)]
|
||||
"EFI" => Box::new(crate::efi::Efi::default()),
|
||||
#[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))]
|
||||
#[allow(clippy::box_default)]
|
||||
"BIOS" => Box::new(crate::bios::Bios::default()),
|
||||
_ => anyhow::bail!("No component {}", name),
|
||||
};
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
/// Returns the path to the payload directory for an available update for
|
||||
/// a component.
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) fn component_updatedirname(component: &dyn Component) -> PathBuf {
|
||||
Path::new(BOOTUPD_UPDATES_DIR).join(component.name())
|
||||
}
|
||||
|
||||
/// Returns the path to the payload directory for an available update for
|
||||
/// a component.
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) fn component_updatedir(sysroot: &str, component: &dyn Component) -> PathBuf {
|
||||
Path::new(sysroot).join(component_updatedirname(component))
|
||||
}
|
||||
|
||||
/// Returns the name of the JSON file containing a component's available update metadata installed
|
||||
/// into the booted operating system root.
|
||||
fn component_update_data_name(component: &dyn Component) -> PathBuf {
|
||||
Path::new(&format!("{}.json", component.name())).into()
|
||||
}
|
||||
|
||||
/// Helper method for writing an update file
|
||||
pub(crate) fn write_update_metadata(
|
||||
sysroot: &str,
|
||||
component: &dyn Component,
|
||||
meta: &ContentMetadata,
|
||||
) -> Result<()> {
|
||||
let sysroot = openat::Dir::open(sysroot)?;
|
||||
let dir = sysroot.sub_dir(BOOTUPD_UPDATES_DIR)?;
|
||||
let name = component_update_data_name(component);
|
||||
dir.write_file_with(name, 0o644, |w| -> Result<_> {
|
||||
Ok(serde_json::to_writer(w, &meta)?)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Given a component, return metadata on the available update (if any)
|
||||
#[context("Loading update for component {}", component.name())]
|
||||
pub(crate) fn get_component_update(
|
||||
sysroot: &openat::Dir,
|
||||
component: &dyn Component,
|
||||
) -> Result<Option<ContentMetadata>> {
|
||||
let name = component_update_data_name(component);
|
||||
let path = Path::new(BOOTUPD_UPDATES_DIR).join(name);
|
||||
if let Some(f) = sysroot.open_file_optional(&path)? {
|
||||
let mut f = std::io::BufReader::new(f);
|
||||
let u = serde_json::from_reader(&mut f)
|
||||
.with_context(|| format!("failed to parse {:?}", &path))?;
|
||||
Ok(Some(u))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
#[context("Querying adoptable state")]
|
||||
pub(crate) fn query_adopt_state() -> Result<Option<Adoptable>> {
|
||||
// This would be extended with support for other operating systems later
|
||||
if let Some(coreos_aleph) = crate::coreos::get_aleph_version(Path::new("/"))? {
|
||||
let meta = ContentMetadata {
|
||||
timestamp: coreos_aleph.ts,
|
||||
version: coreos_aleph.aleph.version,
|
||||
};
|
||||
log::trace!("Adoptable: {:?}", &meta);
|
||||
return Ok(Some(Adoptable {
|
||||
version: meta,
|
||||
confident: true,
|
||||
}));
|
||||
} else {
|
||||
log::trace!("No CoreOS aleph detected");
|
||||
}
|
||||
let ostree_deploy_dir = Path::new("/ostree/deploy");
|
||||
if ostree_deploy_dir.exists() {
|
||||
let btime = ostree_deploy_dir.metadata()?.created()?;
|
||||
let timestamp = chrono::DateTime::from(btime);
|
||||
let meta = ContentMetadata {
|
||||
timestamp,
|
||||
version: "unknown".to_string(),
|
||||
};
|
||||
return Ok(Some(Adoptable {
|
||||
version: meta,
|
||||
confident: true,
|
||||
}));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_efi_vendor() -> Result<()> {
|
||||
let td = tempfile::tempdir()?;
|
||||
let tdp = td.path();
|
||||
let tdp_updates = tdp.join("usr/lib/bootupd/updates");
|
||||
let td = openat::Dir::open(tdp)?;
|
||||
std::fs::create_dir_all(tdp_updates.join("EFI/BOOT"))?;
|
||||
std::fs::create_dir_all(tdp_updates.join("EFI/fedora"))?;
|
||||
std::fs::create_dir_all(tdp_updates.join("EFI/centos"))?;
|
||||
std::fs::write(
|
||||
tdp_updates.join("EFI/fedora").join(crate::efi::SHIM),
|
||||
"shim data",
|
||||
)?;
|
||||
std::fs::write(
|
||||
tdp_updates.join("EFI/centos").join(crate::efi::SHIM),
|
||||
"shim data",
|
||||
)?;
|
||||
|
||||
let all_components = crate::bootupd::get_components();
|
||||
let target_components: Vec<_> = all_components.values().collect();
|
||||
for &component in target_components.iter() {
|
||||
if component.name() == "BIOS" {
|
||||
assert_eq!(component.get_efi_vendor(&td)?, None);
|
||||
}
|
||||
if component.name() == "EFI" {
|
||||
let x = component.get_efi_vendor(&td);
|
||||
assert_eq!(x.is_err(), true);
|
||||
std::fs::remove_dir_all(tdp_updates.join("EFI/centos"))?;
|
||||
assert_eq!(component.get_efi_vendor(&td)?, Some("fedora".to_string()));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
123
bootupd/src/coreos.rs
Executable file
123
bootupd/src/coreos.rs
Executable file
|
|
@ -0,0 +1,123 @@
|
|||
//! Bits specific to Fedora CoreOS (and derivatives).
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use chrono::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::File;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Ord, PartialOrd, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// See https://github.com/coreos/fedora-coreos-tracker/blob/66d7d00bedd9d5eabc7287b9577f443dcefb7c04/internals/README-internals.md#aleph-version
|
||||
pub(crate) struct Aleph {
|
||||
#[serde(alias = "build")]
|
||||
pub(crate) version: String,
|
||||
}
|
||||
|
||||
pub(crate) struct AlephWithTimestamp {
|
||||
pub(crate) aleph: Aleph,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) ts: chrono::DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Path to the file, see above
|
||||
const ALEPH_PATH: &str = "sysroot/.coreos-aleph-version.json";
|
||||
|
||||
pub(crate) fn get_aleph_version(root: &Path) -> Result<Option<AlephWithTimestamp>> {
|
||||
let path = &root.join(ALEPH_PATH);
|
||||
if !path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
let statusf = File::open(path).with_context(|| format!("Opening {path:?}"))?;
|
||||
let meta = statusf.metadata()?;
|
||||
let bufr = std::io::BufReader::new(statusf);
|
||||
let aleph: Aleph = serde_json::from_reader(bufr)?;
|
||||
Ok(Some(AlephWithTimestamp {
|
||||
aleph,
|
||||
ts: meta.created()?.into(),
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
|
||||
const V1_ALEPH_DATA: &str = r##"
|
||||
{
|
||||
"version": "32.20201002.dev.2",
|
||||
"ref": "fedora/x86_64/coreos/testing-devel",
|
||||
"ostree-commit": "b2ea6159d6274e1bbbb49aa0ef093eda5d53a75c8a793dbe184f760ed64dc862"
|
||||
}"##;
|
||||
|
||||
// Waiting on https://github.com/rust-lang/rust/pull/125692
|
||||
#[cfg(not(target_env = "musl"))]
|
||||
#[test]
|
||||
fn test_parse_from_root_empty() -> Result<()> {
|
||||
// Verify we're a no-op in an empty root
|
||||
let root: &tempfile::TempDir = &tempfile::tempdir()?;
|
||||
let root = root.path();
|
||||
assert!(get_aleph_version(root).unwrap().is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Waiting on https://github.com/rust-lang/rust/pull/125692
|
||||
#[cfg(not(target_env = "musl"))]
|
||||
#[test]
|
||||
fn test_parse_from_root() -> Result<()> {
|
||||
let root: &tempfile::TempDir = &tempfile::tempdir()?;
|
||||
let root = root.path();
|
||||
let sysroot = &root.join("sysroot");
|
||||
std::fs::create_dir(sysroot).context("Creating sysroot")?;
|
||||
std::fs::write(root.join(ALEPH_PATH), V1_ALEPH_DATA).context("Writing aleph")?;
|
||||
let aleph = get_aleph_version(root).unwrap().unwrap();
|
||||
assert_eq!(aleph.aleph.version, "32.20201002.dev.2");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Waiting on https://github.com/rust-lang/rust/pull/125692
|
||||
#[cfg(not(target_env = "musl"))]
|
||||
#[test]
|
||||
fn test_parse_from_root_linked() -> Result<()> {
|
||||
let root: &tempfile::TempDir = &tempfile::tempdir()?;
|
||||
let root = root.path();
|
||||
let sysroot = &root.join("sysroot");
|
||||
std::fs::create_dir(sysroot).context("Creating sysroot")?;
|
||||
let target_name = ".new-ostree-aleph.json";
|
||||
let target = &sysroot.join(target_name);
|
||||
std::fs::write(root.join(target), V1_ALEPH_DATA).context("Writing aleph")?;
|
||||
std::os::unix::fs::symlink(target_name, root.join(ALEPH_PATH)).context("Symlinking")?;
|
||||
let aleph = get_aleph_version(root).unwrap().unwrap();
|
||||
assert_eq!(aleph.aleph.version, "32.20201002.dev.2");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_old_aleph() -> Result<()> {
|
||||
// What the aleph file looked like before we changed it in
|
||||
// https://github.com/osbuild/osbuild/pull/1475
|
||||
let alephdata = r##"
|
||||
{
|
||||
"build": "32.20201002.dev.2",
|
||||
"ref": "fedora/x86_64/coreos/testing-devel",
|
||||
"ostree-commit": "b2ea6159d6274e1bbbb49aa0ef093eda5d53a75c8a793dbe184f760ed64dc862",
|
||||
"imgid": "fedora-coreos-32.20201002.dev.2-qemu.x86_64.qcow2"
|
||||
}"##;
|
||||
let aleph: Aleph = serde_json::from_str(alephdata)?;
|
||||
assert_eq!(aleph.version, "32.20201002.dev.2");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_aleph() -> Result<()> {
|
||||
let aleph: Aleph = serde_json::from_str(V1_ALEPH_DATA)?;
|
||||
assert_eq!(aleph.version, "32.20201002.dev.2");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
896
bootupd/src/efi.rs
Executable file
896
bootupd/src/efi.rs
Executable file
|
|
@ -0,0 +1,896 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
use bootc_internal_utils::CommandRunExt;
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use cap_std::fs::Dir;
|
||||
use cap_std_ext::cap_std;
|
||||
use chrono::prelude::*;
|
||||
use fn_error_context::context;
|
||||
use openat_ext::OpenatDirExt;
|
||||
use os_release::OsRelease;
|
||||
use rustix::fd::BorrowedFd;
|
||||
use walkdir::WalkDir;
|
||||
use widestring::U16CString;
|
||||
|
||||
use crate::bootupd::RootContext;
|
||||
use crate::freezethaw::fsfreeze_thaw_cycle;
|
||||
use crate::model::*;
|
||||
use crate::ostreeutil;
|
||||
use crate::util;
|
||||
use crate::{blockdev, filetree, grubconfigs};
|
||||
use crate::{component::*, packagesystem};
|
||||
|
||||
/// Well-known paths to the ESP that may have been mounted external to us.
|
||||
pub(crate) const ESP_MOUNTS: &[&str] = &["boot/efi", "efi", "boot"];
|
||||
|
||||
/// New efi dir under usr/lib
|
||||
const EFILIB: &str = "usr/lib/efi";
|
||||
|
||||
/// The binary to change EFI boot ordering
|
||||
const EFIBOOTMGR: &str = "efibootmgr";
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub(crate) const SHIM: &str = "shimaa64.efi";
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub(crate) const SHIM: &str = "shimx64.efi";
|
||||
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub(crate) const SHIM: &str = "shimriscv64.efi";
|
||||
|
||||
/// Systemd boot loader info EFI variable names
|
||||
const LOADER_INFO_VAR_STR: &str = "LoaderInfo-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f";
|
||||
const STUB_INFO_VAR_STR: &str = "StubInfo-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f";
|
||||
|
||||
/// Return `true` if the system is booted via EFI
|
||||
pub(crate) fn is_efi_booted() -> Result<bool> {
|
||||
Path::new("/sys/firmware/efi")
|
||||
.try_exists()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct Efi {
|
||||
mountpoint: RefCell<Option<PathBuf>>,
|
||||
}
|
||||
|
||||
impl Efi {
|
||||
// Get mounted point for esp
|
||||
pub(crate) fn get_mounted_esp(&self, root: &Path) -> Result<Option<PathBuf>> {
|
||||
// First check all potential mount points without holding the borrow
|
||||
let mut found_mount = None;
|
||||
for &mnt in ESP_MOUNTS.iter() {
|
||||
let path = root.join(mnt);
|
||||
if !path.exists() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let st = rustix::fs::statfs(&path)?;
|
||||
if st.f_type == libc::MSDOS_SUPER_MAGIC {
|
||||
util::ensure_writable_mount(&path)?;
|
||||
found_mount = Some(path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Only borrow mutably if we found a mount point
|
||||
if let Some(mnt) = found_mount {
|
||||
log::debug!("Reusing existing mount point {mnt:?}");
|
||||
*self.mountpoint.borrow_mut() = Some(mnt.clone());
|
||||
Ok(Some(mnt))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// Mount the passed esp_device, return mount point
|
||||
pub(crate) fn mount_esp_device(&self, root: &Path, esp_device: &Path) -> Result<PathBuf> {
|
||||
let mut mountpoint = None;
|
||||
|
||||
for &mnt in ESP_MOUNTS.iter() {
|
||||
let mnt = root.join(mnt);
|
||||
if !mnt.exists() {
|
||||
continue;
|
||||
}
|
||||
std::process::Command::new("mount")
|
||||
.arg(&esp_device)
|
||||
.arg(&mnt)
|
||||
.run()
|
||||
.with_context(|| format!("Failed to mount {:?}", esp_device))?;
|
||||
log::debug!("Mounted at {mnt:?}");
|
||||
mountpoint = Some(mnt);
|
||||
break;
|
||||
}
|
||||
let mnt = mountpoint.ok_or_else(|| anyhow::anyhow!("No mount point found"))?;
|
||||
*self.mountpoint.borrow_mut() = Some(mnt.clone());
|
||||
Ok(mnt)
|
||||
}
|
||||
|
||||
// Firstly check if esp is already mounted, then mount the passed esp device
|
||||
pub(crate) fn ensure_mounted_esp(&self, root: &Path, esp_device: &Path) -> Result<PathBuf> {
|
||||
if let Some(mountpoint) = self.mountpoint.borrow().as_deref() {
|
||||
return Ok(mountpoint.to_owned());
|
||||
}
|
||||
let destdir = if let Some(destdir) = self.get_mounted_esp(Path::new(root))? {
|
||||
destdir
|
||||
} else {
|
||||
self.mount_esp_device(root, esp_device)?
|
||||
};
|
||||
Ok(destdir)
|
||||
}
|
||||
|
||||
fn unmount(&self) -> Result<()> {
|
||||
if let Some(mount) = self.mountpoint.borrow_mut().take() {
|
||||
Command::new("umount")
|
||||
.arg(&mount)
|
||||
.run()
|
||||
.with_context(|| format!("Failed to unmount {mount:?}"))?;
|
||||
log::trace!("Unmounted");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[context("Updating EFI firmware variables")]
|
||||
fn update_firmware(&self, device: &str, espdir: &openat::Dir, vendordir: &str) -> Result<()> {
|
||||
if !is_efi_booted()? {
|
||||
log::debug!("Not booted via EFI, skipping firmware update");
|
||||
return Ok(());
|
||||
}
|
||||
let sysroot = Dir::open_ambient_dir("/", cap_std::ambient_authority())?;
|
||||
let product_name = get_product_name(&sysroot)?;
|
||||
log::debug!("Get product name: '{product_name}'");
|
||||
assert!(product_name.len() > 0);
|
||||
// clear all the boot entries that match the target name
|
||||
clear_efi_target(&product_name)?;
|
||||
create_efi_boot_entry(device, espdir, vendordir, &product_name)
|
||||
}
|
||||
}
|
||||
|
||||
#[context("Get product name")]
|
||||
fn get_product_name(sysroot: &Dir) -> Result<String> {
|
||||
let release_path = "etc/system-release";
|
||||
if sysroot.exists(release_path) {
|
||||
let content = sysroot.read_to_string(release_path)?;
|
||||
let re = regex::Regex::new(r" *release.*").unwrap();
|
||||
let name = re.replace_all(&content, "").trim().to_string();
|
||||
return Ok(name);
|
||||
}
|
||||
// Read /etc/os-release
|
||||
let release: OsRelease = OsRelease::new()?;
|
||||
Ok(release.name)
|
||||
}
|
||||
|
||||
/// Convert a nul-terminated UTF-16 byte array to a String.
|
||||
fn string_from_utf16_bytes(slice: &[u8]) -> String {
|
||||
// For some reason, systemd appends 3 nul bytes after the string.
|
||||
// Drop the last byte if there's an odd number.
|
||||
let size = slice.len() / 2;
|
||||
let v: Vec<u16> = (0..size)
|
||||
.map(|i| u16::from_ne_bytes([slice[2 * i], slice[2 * i + 1]]))
|
||||
.collect();
|
||||
U16CString::from_vec(v).unwrap().to_string_lossy()
|
||||
}
|
||||
|
||||
/// Read a nul-terminated UTF-16 string from an EFI variable.
|
||||
fn read_efi_var_utf16_string(name: &str) -> Option<String> {
|
||||
let efivars = Path::new("/sys/firmware/efi/efivars");
|
||||
if !efivars.exists() {
|
||||
log::trace!("No efivars mount at {:?}", efivars);
|
||||
return None;
|
||||
}
|
||||
let path = efivars.join(name);
|
||||
if !path.exists() {
|
||||
log::trace!("No EFI variable {name}");
|
||||
return None;
|
||||
}
|
||||
match std::fs::read(&path) {
|
||||
Ok(buf) => {
|
||||
// Skip the first 4 bytes, those are the EFI variable attributes.
|
||||
if buf.len() < 4 {
|
||||
log::warn!("Read less than 4 bytes from {:?}", path);
|
||||
return None;
|
||||
}
|
||||
Some(string_from_utf16_bytes(&buf[4..]))
|
||||
}
|
||||
Err(reason) => {
|
||||
log::warn!("Failed reading {:?}: {reason}", path);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the LoaderInfo EFI variable if it exists.
|
||||
fn get_loader_info() -> Option<String> {
|
||||
read_efi_var_utf16_string(LOADER_INFO_VAR_STR)
|
||||
}
|
||||
|
||||
/// Read the StubInfo EFI variable if it exists.
|
||||
fn get_stub_info() -> Option<String> {
|
||||
read_efi_var_utf16_string(STUB_INFO_VAR_STR)
|
||||
}
|
||||
|
||||
/// Whether to skip adoption if a systemd bootloader is found.
|
||||
fn skip_systemd_bootloaders() -> bool {
|
||||
if let Some(loader_info) = get_loader_info() {
|
||||
if loader_info.starts_with("systemd") {
|
||||
log::trace!("Skipping adoption for {:?}", loader_info);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(stub_info) = get_stub_info() {
|
||||
log::trace!("Skipping adoption for {:?}", stub_info);
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
impl Component for Efi {
|
||||
fn name(&self) -> &'static str {
|
||||
"EFI"
|
||||
}
|
||||
|
||||
fn query_adopt(&self, devices: &Option<Vec<String>>) -> Result<Option<Adoptable>> {
|
||||
if devices.is_none() {
|
||||
log::trace!("No ESP detected");
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Don't adopt if the system is booted with systemd-boot or
|
||||
// systemd-stub since those will be managed with bootctl.
|
||||
if skip_systemd_bootloaders() {
|
||||
return Ok(None);
|
||||
}
|
||||
crate::component::query_adopt_state()
|
||||
}
|
||||
|
||||
// Backup "/boot/efi/EFI/{vendor}/grub.cfg" to "/boot/efi/EFI/{vendor}/grub.cfg.bak"
|
||||
// Replace "/boot/efi/EFI/{vendor}/grub.cfg" with new static "grub.cfg"
|
||||
fn migrate_static_grub_config(&self, sysroot_path: &str, destdir: &openat::Dir) -> Result<()> {
|
||||
let sysroot =
|
||||
openat::Dir::open(sysroot_path).with_context(|| format!("Opening {sysroot_path}"))?;
|
||||
let Some(vendor) = self.get_efi_vendor(&sysroot)? else {
|
||||
anyhow::bail!("Failed to find efi vendor");
|
||||
};
|
||||
|
||||
// destdir is /boot/efi/EFI
|
||||
let efidir = destdir
|
||||
.sub_dir(&vendor)
|
||||
.with_context(|| format!("Opening EFI/{}", vendor))?;
|
||||
|
||||
if !efidir.exists(grubconfigs::GRUBCONFIG_BACKUP)? {
|
||||
println!("Creating a backup of the current GRUB config on EFI");
|
||||
efidir
|
||||
.copy_file(grubconfigs::GRUBCONFIG, grubconfigs::GRUBCONFIG_BACKUP)
|
||||
.context("Failed to backup GRUB config")?;
|
||||
}
|
||||
|
||||
grubconfigs::install(&sysroot, Some(&vendor), true)?;
|
||||
// Synchronize the filesystem containing /boot/efi/EFI/{vendor} to disk.
|
||||
fsfreeze_thaw_cycle(efidir.open_file(".")?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Given an adoptable system and an update, perform the update.
|
||||
fn adopt_update(
|
||||
&self,
|
||||
rootcxt: &RootContext,
|
||||
updatemeta: &ContentMetadata,
|
||||
with_static_config: bool,
|
||||
) -> Result<Option<InstalledContent>> {
|
||||
let esp_devices = blockdev::find_colocated_esps(&rootcxt.devices)?;
|
||||
let Some(meta) = self.query_adopt(&esp_devices)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let updated = rootcxt
|
||||
.sysroot
|
||||
.sub_dir(&component_updatedirname(self))
|
||||
.context("opening update dir")?;
|
||||
let updatef = filetree::FileTree::new_from_dir(&updated).context("reading update dir")?;
|
||||
|
||||
let esp_devices = esp_devices.unwrap_or_default();
|
||||
for esp in esp_devices {
|
||||
let destpath = &self.ensure_mounted_esp(rootcxt.path.as_ref(), Path::new(&esp))?;
|
||||
|
||||
let efidir = openat::Dir::open(&destpath.join("EFI")).context("opening EFI dir")?;
|
||||
validate_esp_fstype(&efidir)?;
|
||||
|
||||
// For adoption, we should only touch files that we know about.
|
||||
let diff = updatef.relative_diff_to(&efidir)?;
|
||||
log::trace!("applying adoption diff: {}", &diff);
|
||||
filetree::apply_diff(&updated, &efidir, &diff, None)
|
||||
.context("applying filesystem changes")?;
|
||||
|
||||
// Backup current config and install static config
|
||||
if with_static_config {
|
||||
// Install the static config if the OSTree bootloader is not set.
|
||||
if let Some(bootloader) = crate::ostreeutil::get_ostree_bootloader()? {
|
||||
println!(
|
||||
"ostree repo 'sysroot.bootloader' config option is currently set to: '{bootloader}'",
|
||||
);
|
||||
} else {
|
||||
println!("ostree repo 'sysroot.bootloader' config option is not set yet");
|
||||
self.migrate_static_grub_config(rootcxt.path.as_str(), &efidir)?;
|
||||
};
|
||||
}
|
||||
|
||||
// Do the sync before unmount
|
||||
fsfreeze_thaw_cycle(efidir.open_file(".")?)?;
|
||||
drop(efidir);
|
||||
self.unmount().context("unmount after adopt")?;
|
||||
}
|
||||
Ok(Some(InstalledContent {
|
||||
meta: updatemeta.clone(),
|
||||
filetree: Some(updatef),
|
||||
adopted_from: Some(meta.version),
|
||||
}))
|
||||
}
|
||||
|
||||
fn install(
|
||||
&self,
|
||||
src_root: &openat::Dir,
|
||||
dest_root: &str,
|
||||
device: &str,
|
||||
update_firmware: bool,
|
||||
) -> Result<InstalledContent> {
|
||||
let Some(meta) = get_component_update(src_root, self)? else {
|
||||
anyhow::bail!("No update metadata for component {} found", self.name());
|
||||
};
|
||||
log::debug!("Found metadata {}", meta.version);
|
||||
let srcdir_name = component_updatedirname(self);
|
||||
let ft = crate::filetree::FileTree::new_from_dir(&src_root.sub_dir(&srcdir_name)?)?;
|
||||
|
||||
// Let's attempt to use an already mounted ESP at the target
|
||||
// dest_root if one is already mounted there in a known ESP location.
|
||||
let destpath = if let Some(destdir) = self.get_mounted_esp(Path::new(dest_root))? {
|
||||
destdir
|
||||
} else {
|
||||
// Using `blockdev` to find the partition instead of partlabel because
|
||||
// we know the target install toplevel device already.
|
||||
if device.is_empty() {
|
||||
anyhow::bail!("Device value not provided");
|
||||
}
|
||||
let esp_device = blockdev::get_esp_partition(device)?
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to find ESP device"))?;
|
||||
self.mount_esp_device(Path::new(dest_root), Path::new(&esp_device))?
|
||||
};
|
||||
|
||||
let destd = &openat::Dir::open(&destpath)
|
||||
.with_context(|| format!("opening dest dir {}", destpath.display()))?;
|
||||
validate_esp_fstype(destd)?;
|
||||
|
||||
// TODO - add some sort of API that allows directly setting the working
|
||||
// directory to a file descriptor.
|
||||
std::process::Command::new("cp")
|
||||
.args(["-rp", "--reflink=auto"])
|
||||
.arg(&srcdir_name)
|
||||
.arg(destpath)
|
||||
.current_dir(format!("/proc/self/fd/{}", src_root.as_raw_fd()))
|
||||
.run()?;
|
||||
if update_firmware {
|
||||
if let Some(vendordir) = self.get_efi_vendor(&src_root)? {
|
||||
self.update_firmware(device, destd, &vendordir)?
|
||||
}
|
||||
}
|
||||
Ok(InstalledContent {
|
||||
meta,
|
||||
filetree: Some(ft),
|
||||
adopted_from: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn run_update(
|
||||
&self,
|
||||
rootcxt: &RootContext,
|
||||
current: &InstalledContent,
|
||||
) -> Result<InstalledContent> {
|
||||
let currentf = current
|
||||
.filetree
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow::anyhow!("No filetree for installed EFI found!"))?;
|
||||
let sysroot_dir = &rootcxt.sysroot;
|
||||
let updatemeta = self.query_update(sysroot_dir)?.expect("update available");
|
||||
let updated = sysroot_dir
|
||||
.sub_dir(&component_updatedirname(self))
|
||||
.context("opening update dir")?;
|
||||
let updatef = filetree::FileTree::new_from_dir(&updated).context("reading update dir")?;
|
||||
let diff = currentf.diff(&updatef)?;
|
||||
|
||||
let Some(esp_devices) = blockdev::find_colocated_esps(&rootcxt.devices)? else {
|
||||
anyhow::bail!("Failed to find all esp devices");
|
||||
};
|
||||
|
||||
for esp in esp_devices {
|
||||
let destpath = &self.ensure_mounted_esp(rootcxt.path.as_ref(), Path::new(&esp))?;
|
||||
let destdir = openat::Dir::open(&destpath.join("EFI")).context("opening EFI dir")?;
|
||||
validate_esp_fstype(&destdir)?;
|
||||
log::trace!("applying diff: {}", &diff);
|
||||
filetree::apply_diff(&updated, &destdir, &diff, None)
|
||||
.context("applying filesystem changes")?;
|
||||
|
||||
// Do the sync before unmount
|
||||
fsfreeze_thaw_cycle(destdir.open_file(".")?)?;
|
||||
drop(destdir);
|
||||
self.unmount().context("unmount after update")?;
|
||||
}
|
||||
|
||||
let adopted_from = None;
|
||||
Ok(InstalledContent {
|
||||
meta: updatemeta,
|
||||
filetree: Some(updatef),
|
||||
adopted_from,
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_update_metadata(&self, sysroot: &str) -> Result<ContentMetadata> {
|
||||
let sysroot_path = Utf8Path::new(sysroot);
|
||||
|
||||
// copy EFI files to updates dir from usr/lib/efi
|
||||
let efilib_path = sysroot_path.join(EFILIB);
|
||||
let meta = if efilib_path.exists() {
|
||||
let mut packages = Vec::new();
|
||||
let sysroot_dir = Dir::open_ambient_dir(sysroot_path, cap_std::ambient_authority())?;
|
||||
let efi_components = get_efi_component_from_usr(&sysroot_path, EFILIB)?;
|
||||
if efi_components.len() == 0 {
|
||||
bail!("Failed to find EFI components from {efilib_path}");
|
||||
}
|
||||
for efi in efi_components {
|
||||
Command::new("cp")
|
||||
.args(["-rp", "--reflink=auto"])
|
||||
.arg(&efi.path)
|
||||
.arg(crate::model::BOOTUPD_UPDATES_DIR)
|
||||
.current_dir(format!("/proc/self/fd/{}", sysroot_dir.as_raw_fd()))
|
||||
.run()?;
|
||||
packages.push(format!("{}-{}", efi.name, efi.version));
|
||||
}
|
||||
|
||||
// change to now to workaround https://github.com/coreos/bootupd/issues/933
|
||||
let timestamp = std::time::SystemTime::now();
|
||||
ContentMetadata {
|
||||
timestamp: chrono::DateTime::<Utc>::from(timestamp),
|
||||
version: packages.join(","),
|
||||
}
|
||||
} else {
|
||||
let ostreebootdir = sysroot_path.join(ostreeutil::BOOT_PREFIX);
|
||||
|
||||
// move EFI files to updates dir from /usr/lib/ostree-boot
|
||||
if ostreebootdir.exists() {
|
||||
let cruft = ["loader", "grub2"];
|
||||
for p in cruft.iter() {
|
||||
let p = ostreebootdir.join(p);
|
||||
if p.exists() {
|
||||
std::fs::remove_dir_all(&p)?;
|
||||
}
|
||||
}
|
||||
|
||||
let efisrc = ostreebootdir.join("efi/EFI");
|
||||
if !efisrc.exists() {
|
||||
bail!("Failed to find {:?}", &efisrc);
|
||||
}
|
||||
|
||||
let dest_efidir = component_updatedir(sysroot, self);
|
||||
let dest_efidir =
|
||||
Utf8PathBuf::from_path_buf(dest_efidir).expect("Path is invalid UTF-8");
|
||||
// Fork off mv() because on overlayfs one can't rename() a lower level
|
||||
// directory today, and this will handle the copy fallback.
|
||||
Command::new("mv").args([&efisrc, &dest_efidir]).run()?;
|
||||
|
||||
let efidir = openat::Dir::open(dest_efidir.as_std_path())
|
||||
.with_context(|| format!("Opening {}", dest_efidir))?;
|
||||
let files = crate::util::filenames(&efidir)?.into_iter().map(|mut f| {
|
||||
f.insert_str(0, "/boot/efi/EFI/");
|
||||
f
|
||||
});
|
||||
packagesystem::query_files(sysroot, files)?
|
||||
} else {
|
||||
anyhow::bail!("Failed to find {ostreebootdir}");
|
||||
}
|
||||
};
|
||||
|
||||
write_update_metadata(sysroot, self, &meta)?;
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
fn query_update(&self, sysroot: &openat::Dir) -> Result<Option<ContentMetadata>> {
|
||||
get_component_update(sysroot, self)
|
||||
}
|
||||
|
||||
fn validate(&self, current: &InstalledContent) -> Result<ValidationResult> {
|
||||
let devices = crate::blockdev::get_devices("/").context("get parent devices")?;
|
||||
let esp_devices = blockdev::find_colocated_esps(&devices)?;
|
||||
if !is_efi_booted()? && esp_devices.is_none() {
|
||||
return Ok(ValidationResult::Skip);
|
||||
}
|
||||
let currentf = current
|
||||
.filetree
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow::anyhow!("No filetree for installed EFI found!"))?;
|
||||
|
||||
let mut errs = Vec::new();
|
||||
let esp_devices = esp_devices.unwrap_or_default();
|
||||
for esp in esp_devices.iter() {
|
||||
let destpath = &self.ensure_mounted_esp(Path::new("/"), Path::new(&esp))?;
|
||||
|
||||
let efidir = openat::Dir::open(&destpath.join("EFI"))
|
||||
.with_context(|| format!("opening EFI dir {}", destpath.display()))?;
|
||||
let diff = currentf.relative_diff_to(&efidir)?;
|
||||
|
||||
for f in diff.changes.iter() {
|
||||
errs.push(format!("Changed: {}", f));
|
||||
}
|
||||
for f in diff.removals.iter() {
|
||||
errs.push(format!("Removed: {}", f));
|
||||
}
|
||||
assert_eq!(diff.additions.len(), 0);
|
||||
drop(efidir);
|
||||
self.unmount().context("unmount after validate")?;
|
||||
}
|
||||
|
||||
if !errs.is_empty() {
|
||||
Ok(ValidationResult::Errors(errs))
|
||||
} else {
|
||||
Ok(ValidationResult::Valid)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_efi_vendor(&self, sysroot: &openat::Dir) -> Result<Option<String>> {
|
||||
let updated = sysroot
|
||||
.sub_dir(&component_updatedirname(self))
|
||||
.context("opening update dir")?;
|
||||
let shim_files = find_file_recursive(updated.recover_path()?, SHIM)?;
|
||||
|
||||
// Does not support multiple shim for efi
|
||||
if shim_files.len() > 1 {
|
||||
anyhow::bail!("Found multiple {SHIM} in the image");
|
||||
}
|
||||
if let Some(p) = shim_files.first() {
|
||||
let p = p
|
||||
.parent()
|
||||
.unwrap()
|
||||
.file_name()
|
||||
.ok_or_else(|| anyhow::anyhow!("No file name found"))?;
|
||||
Ok(Some(p.to_string_lossy().into_owned()))
|
||||
} else {
|
||||
anyhow::bail!("Failed to find {SHIM} in the image")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Efi {
|
||||
fn drop(&mut self) {
|
||||
log::debug!("Unmounting");
|
||||
let _ = self.unmount();
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_esp_fstype(dir: &openat::Dir) -> Result<()> {
|
||||
let dir = unsafe { BorrowedFd::borrow_raw(dir.as_raw_fd()) };
|
||||
let stat = rustix::fs::fstatfs(&dir)?;
|
||||
if stat.f_type != libc::MSDOS_SUPER_MAGIC {
|
||||
bail!(
|
||||
"EFI mount is not a msdos filesystem, but is {:?}",
|
||||
stat.f_type
|
||||
);
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct BootEntry {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
/// Parse boot entries from efibootmgr output
|
||||
fn parse_boot_entries(output: &str) -> Vec<BootEntry> {
|
||||
let mut entries = Vec::new();
|
||||
|
||||
for line in output.lines().filter_map(|line| line.strip_prefix("Boot")) {
|
||||
// Need to consider if output only has "Boot0000* UiApp", without additional info
|
||||
if line.starts_with('0') {
|
||||
let parts = if let Some((parts, _)) = line.split_once('\t') {
|
||||
parts
|
||||
} else {
|
||||
line
|
||||
};
|
||||
if let Some((id, name)) = parts.split_once(' ') {
|
||||
let id = id.trim_end_matches('*').to_string();
|
||||
let name = name.trim().to_string();
|
||||
entries.push(BootEntry { id, name });
|
||||
}
|
||||
}
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
#[context("Clearing EFI boot entries that match target {target}")]
|
||||
pub(crate) fn clear_efi_target(target: &str) -> Result<()> {
|
||||
let target = target.to_lowercase();
|
||||
let output = Command::new(EFIBOOTMGR).output()?;
|
||||
if !output.status.success() {
|
||||
anyhow::bail!("Failed to invoke {EFIBOOTMGR}")
|
||||
}
|
||||
|
||||
let output = String::from_utf8(output.stdout)?;
|
||||
let boot_entries = parse_boot_entries(&output);
|
||||
for entry in boot_entries {
|
||||
if entry.name.to_lowercase() == target {
|
||||
log::debug!("Deleting matched target {:?}", entry);
|
||||
let mut cmd = Command::new(EFIBOOTMGR);
|
||||
cmd.args(["-b", entry.id.as_str(), "-B"]);
|
||||
println!("Executing: {cmd:?}");
|
||||
cmd.run_with_cmd_context()?;
|
||||
}
|
||||
}
|
||||
|
||||
anyhow::Ok(())
|
||||
}
|
||||
|
||||
#[context("Adding new EFI boot entry")]
|
||||
pub(crate) fn create_efi_boot_entry(
|
||||
device: &str,
|
||||
espdir: &openat::Dir,
|
||||
vendordir: &str,
|
||||
target: &str,
|
||||
) -> Result<()> {
|
||||
let fsinfo = crate::filesystem::inspect_filesystem(espdir, ".")?;
|
||||
let source = fsinfo.source;
|
||||
let devname = source
|
||||
.rsplit_once('/')
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to parse {source}"))?
|
||||
.1;
|
||||
let partition_path = format!("/sys/class/block/{devname}/partition");
|
||||
let partition_number = std::fs::read_to_string(&partition_path)
|
||||
.with_context(|| format!("Failed to read {partition_path}"))?;
|
||||
let shim = format!("{vendordir}/{SHIM}");
|
||||
if espdir.exists(&shim)? {
|
||||
anyhow::bail!("Failed to find {SHIM}");
|
||||
}
|
||||
let loader = format!("\\EFI\\{}\\{SHIM}", vendordir);
|
||||
log::debug!("Creating new EFI boot entry using '{target}'");
|
||||
let mut cmd = Command::new(EFIBOOTMGR);
|
||||
cmd.args([
|
||||
"--create",
|
||||
"--disk",
|
||||
device,
|
||||
"--part",
|
||||
partition_number.trim(),
|
||||
"--loader",
|
||||
loader.as_str(),
|
||||
"--label",
|
||||
target,
|
||||
]);
|
||||
println!("Executing: {cmd:?}");
|
||||
cmd.run_with_cmd_context()
|
||||
}
|
||||
|
||||
#[context("Find target file recursively")]
|
||||
fn find_file_recursive<P: AsRef<Path>>(dir: P, target_file: &str) -> Result<Vec<PathBuf>> {
|
||||
let mut result = Vec::new();
|
||||
|
||||
for entry in WalkDir::new(dir).into_iter().filter_map(|e| e.ok()) {
|
||||
if entry.file_type().is_file() {
|
||||
if let Some(file_name) = entry.file_name().to_str() {
|
||||
if file_name == target_file {
|
||||
if let Some(path) = entry.path().to_str() {
|
||||
result.push(path.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct EFIComponent {
|
||||
name: String,
|
||||
version: String,
|
||||
path: Utf8PathBuf,
|
||||
}
|
||||
|
||||
/// Get EFIComponents from e.g. usr/lib/efi, like "usr/lib/efi/<name>/<version>/EFI"
|
||||
fn get_efi_component_from_usr<'a>(
|
||||
sysroot: &'a Utf8Path,
|
||||
usr_path: &'a str,
|
||||
) -> Result<Vec<EFIComponent>> {
|
||||
let efilib_path = sysroot.join(usr_path);
|
||||
let skip_count = Utf8Path::new(usr_path).components().count();
|
||||
|
||||
let mut components: Vec<EFIComponent> = WalkDir::new(&efilib_path)
|
||||
.min_depth(3) // <name>/<version>/EFI: so 3 levels down
|
||||
.max_depth(3)
|
||||
.into_iter()
|
||||
.filter_map(|entry| {
|
||||
let entry = entry.ok()?;
|
||||
if !entry.file_type().is_dir() || entry.file_name() != "EFI" {
|
||||
return None;
|
||||
}
|
||||
|
||||
let abs_path = entry.path();
|
||||
let rel_path = abs_path.strip_prefix(sysroot).ok()?;
|
||||
let utf8_rel_path = Utf8PathBuf::from_path_buf(rel_path.to_path_buf()).ok()?;
|
||||
|
||||
let mut components = utf8_rel_path.components();
|
||||
|
||||
let name = components.nth(skip_count)?.to_string();
|
||||
let version = components.next()?.to_string();
|
||||
|
||||
Some(EFIComponent {
|
||||
name,
|
||||
version,
|
||||
path: utf8_rel_path,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
components.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
Ok(components)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use cap_std_ext::dirext::CapStdExtDirExt;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_boot_entries() -> Result<()> {
|
||||
let output = r"
|
||||
BootCurrent: 0003
|
||||
Timeout: 0 seconds
|
||||
BootOrder: 0003,0001,0000,0002
|
||||
Boot0000* UiApp FvVol(7cb8bdc9-f8eb-4f34-aaea-3ee4af6516a1)/FvFile(462caa21-7614-4503-836e-8ab6f4662331)
|
||||
Boot0001* UEFI Misc Device PciRoot(0x0)/Pci(0x3,0x0){auto_created_boot_option}
|
||||
Boot0002* EFI Internal Shell FvVol(7cb8bdc9-f8eb-4f34-aaea-3ee4af6516a1)/FvFile(7c04a583-9e3e-4f1c-ad65-e05268d0b4d1)
|
||||
Boot0003* Fedora HD(2,GPT,94ff4025-5276-4bec-adea-e98da271b64c,0x1000,0x3f800)/\EFI\fedora\shimx64.efi";
|
||||
let entries = parse_boot_entries(output);
|
||||
assert_eq!(
|
||||
entries,
|
||||
[
|
||||
BootEntry {
|
||||
id: "0000".to_string(),
|
||||
name: "UiApp".to_string()
|
||||
},
|
||||
BootEntry {
|
||||
id: "0001".to_string(),
|
||||
name: "UEFI Misc Device".to_string()
|
||||
},
|
||||
BootEntry {
|
||||
id: "0002".to_string(),
|
||||
name: "EFI Internal Shell".to_string()
|
||||
},
|
||||
BootEntry {
|
||||
id: "0003".to_string(),
|
||||
name: "Fedora".to_string()
|
||||
}
|
||||
]
|
||||
);
|
||||
let output = r"
|
||||
BootCurrent: 0003
|
||||
Timeout: 0 seconds
|
||||
BootOrder: 0003,0001,0000,0002";
|
||||
let entries = parse_boot_entries(output);
|
||||
assert_eq!(entries, []);
|
||||
|
||||
let output = r"
|
||||
BootCurrent: 0003
|
||||
Timeout: 0 seconds
|
||||
BootOrder: 0003,0001,0000,0002
|
||||
Boot0000* UiApp
|
||||
Boot0001* UEFI Misc Device
|
||||
Boot0002* EFI Internal Shell
|
||||
Boot0003* test";
|
||||
let entries = parse_boot_entries(output);
|
||||
assert_eq!(
|
||||
entries,
|
||||
[
|
||||
BootEntry {
|
||||
id: "0000".to_string(),
|
||||
name: "UiApp".to_string()
|
||||
},
|
||||
BootEntry {
|
||||
id: "0001".to_string(),
|
||||
name: "UEFI Misc Device".to_string()
|
||||
},
|
||||
BootEntry {
|
||||
id: "0002".to_string(),
|
||||
name: "EFI Internal Shell".to_string()
|
||||
},
|
||||
BootEntry {
|
||||
id: "0003".to_string(),
|
||||
name: "test".to_string()
|
||||
}
|
||||
]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
#[cfg(test)]
|
||||
fn fixture() -> Result<cap_std_ext::cap_tempfile::TempDir> {
|
||||
let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
|
||||
tempdir.create_dir("etc")?;
|
||||
Ok(tempdir)
|
||||
}
|
||||
#[test]
|
||||
fn test_get_product_name() -> Result<()> {
|
||||
let tmpd = fixture()?;
|
||||
{
|
||||
tmpd.atomic_write("etc/system-release", "Fedora release 40 (Forty)")?;
|
||||
let name = get_product_name(&tmpd)?;
|
||||
assert_eq!("Fedora", name);
|
||||
}
|
||||
{
|
||||
tmpd.atomic_write("etc/system-release", "CentOS Stream release 9")?;
|
||||
let name = get_product_name(&tmpd)?;
|
||||
assert_eq!("CentOS Stream", name);
|
||||
}
|
||||
{
|
||||
tmpd.atomic_write(
|
||||
"etc/system-release",
|
||||
"Red Hat Enterprise Linux CoreOS release 4",
|
||||
)?;
|
||||
let name = get_product_name(&tmpd)?;
|
||||
assert_eq!("Red Hat Enterprise Linux CoreOS", name);
|
||||
}
|
||||
{
|
||||
tmpd.atomic_write(
|
||||
"etc/system-release",
|
||||
"Red Hat Enterprise Linux CoreOS release 4
|
||||
",
|
||||
)?;
|
||||
let name = get_product_name(&tmpd)?;
|
||||
assert_eq!("Red Hat Enterprise Linux CoreOS", name);
|
||||
}
|
||||
{
|
||||
tmpd.remove_file("etc/system-release")?;
|
||||
let name = get_product_name(&tmpd)?;
|
||||
assert!(name.len() > 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_efi_component_from_usr() -> Result<()> {
|
||||
let tmpdir: &tempfile::TempDir = &tempfile::tempdir()?;
|
||||
let tpath = tmpdir.path();
|
||||
let efi_path = tpath.join("usr/lib/efi");
|
||||
std::fs::create_dir_all(efi_path.join("BAR/1.1/EFI"))?;
|
||||
std::fs::create_dir_all(efi_path.join("FOO/1.1/EFI"))?;
|
||||
std::fs::create_dir_all(efi_path.join("FOOBAR/1.1/test"))?;
|
||||
let utf8_tpath =
|
||||
Utf8Path::from_path(tpath).ok_or_else(|| anyhow::anyhow!("Path is not valid UTF-8"))?;
|
||||
let efi_comps = get_efi_component_from_usr(utf8_tpath, EFILIB)?;
|
||||
assert_eq!(
|
||||
efi_comps,
|
||||
vec![
|
||||
EFIComponent {
|
||||
name: "BAR".to_string(),
|
||||
version: "1.1".to_string(),
|
||||
path: Utf8PathBuf::from("usr/lib/efi/BAR/1.1/EFI"),
|
||||
},
|
||||
EFIComponent {
|
||||
name: "FOO".to_string(),
|
||||
version: "1.1".to_string(),
|
||||
path: Utf8PathBuf::from("usr/lib/efi/FOO/1.1/EFI"),
|
||||
},
|
||||
]
|
||||
);
|
||||
std::fs::remove_dir_all(efi_path.join("BAR/1.1/EFI"))?;
|
||||
std::fs::remove_dir_all(efi_path.join("FOO/1.1/EFI"))?;
|
||||
let efi_comps = get_efi_component_from_usr(utf8_tpath, EFILIB)?;
|
||||
assert_eq!(efi_comps, []);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
21
bootupd/src/failpoints.rs
Executable file
21
bootupd/src/failpoints.rs
Executable file
|
|
@ -0,0 +1,21 @@
|
|||
//! Wrappers and utilities on top of the `fail` crate.
|
||||
// SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||
|
||||
/// TODO: Use https://github.com/tikv/fail-rs/pull/68 once it merges
|
||||
/// copy from https://github.com/coreos/rpm-ostree/commit/aa8d7fb0ceaabfaf10252180e2ddee049d07aae3#diff-adcc419e139605fae34d17b31418dbaf515af2fe9fb766fcbdb2eaad862b3daa
|
||||
#[macro_export]
|
||||
macro_rules! try_fail_point {
|
||||
($name:expr) => {{
|
||||
if let Some(e) = fail::eval($name, |msg| {
|
||||
let msg = msg.unwrap_or_else(|| "synthetic failpoint".to_string());
|
||||
anyhow::Error::msg(msg)
|
||||
}) {
|
||||
return Err(From::from(e));
|
||||
}
|
||||
}};
|
||||
($name:expr, $cond:expr) => {{
|
||||
if $cond {
|
||||
$crate::try_fail_point!($name);
|
||||
}
|
||||
}};
|
||||
}
|
||||
40
bootupd/src/filesystem.rs
Executable file
40
bootupd/src/filesystem.rs
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
use std::os::fd::AsRawFd;
|
||||
use std::os::unix::process::CommandExt;
|
||||
use std::process::Command;
|
||||
|
||||
use anyhow::Result;
|
||||
use bootc_internal_utils::CommandRunExt;
|
||||
use fn_error_context::context;
|
||||
use rustix::fd::BorrowedFd;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct Filesystem {
|
||||
pub(crate) source: String,
|
||||
pub(crate) fstype: String,
|
||||
pub(crate) options: String,
|
||||
pub(crate) uuid: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub(crate) struct Findmnt {
|
||||
pub(crate) filesystems: Vec<Filesystem>,
|
||||
}
|
||||
|
||||
#[context("Inspecting filesystem {path:?}")]
|
||||
pub(crate) fn inspect_filesystem(root: &openat::Dir, path: &str) -> Result<Filesystem> {
|
||||
let rootfd = unsafe { BorrowedFd::borrow_raw(root.as_raw_fd()) };
|
||||
// SAFETY: This is unsafe just for the pre_exec, when we port to cap-std we can use cap-std-ext
|
||||
let o: Findmnt = unsafe {
|
||||
Command::new("findmnt")
|
||||
.args(["-J", "-v", "--output=SOURCE,FSTYPE,OPTIONS,UUID", path])
|
||||
.pre_exec(move || rustix::process::fchdir(rootfd).map_err(Into::into))
|
||||
.run_and_parse_json()?
|
||||
};
|
||||
o.filesystems
|
||||
.into_iter()
|
||||
.next()
|
||||
.ok_or_else(|| anyhow::anyhow!("findmnt returned no data"))
|
||||
}
|
||||
785
bootupd/src/filetree.rs
Executable file
785
bootupd/src/filetree.rs
Executable file
|
|
@ -0,0 +1,785 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
use crate::freezethaw::fsfreeze_thaw_cycle;
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
use anyhow::{bail, Context, Result};
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
use openat_ext::OpenatDirExt;
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
use openssl::hash::{Hasher, MessageDigest};
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
use rustix::fd::BorrowedFd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[allow(unused_imports)]
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::fmt::Display;
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
/// The prefix we apply to our temporary files.
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) const TMP_PREFIX: &str = ".btmp.";
|
||||
// This module doesn't handle modes right now, because
|
||||
// we're only targeting FAT filesystems for UEFI.
|
||||
// In FAT there are no unix permission bits, usually
|
||||
// they're set by mount options.
|
||||
// See also https://github.com/coreos/fedora-coreos-config/commit/8863c2b34095a2ae5eae6fbbd121768a5f592091
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
const DEFAULT_FILE_MODE: u32 = 0o700;
|
||||
|
||||
use crate::sha512string::SHA512String;
|
||||
|
||||
/// Metadata for a single file
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, Hash, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct FileMetadata {
|
||||
/// File size in bytes
|
||||
pub(crate) size: u64,
|
||||
/// Content checksum; chose SHA-512 because there are not a lot of files here
|
||||
/// and it's ok if the checksum is large.
|
||||
pub(crate) sha512: SHA512String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct FileTree {
|
||||
pub(crate) children: BTreeMap<String, FileMetadata>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct FileTreeDiff {
|
||||
pub(crate) additions: HashSet<String>,
|
||||
pub(crate) removals: HashSet<String>,
|
||||
pub(crate) changes: HashSet<String>,
|
||||
}
|
||||
|
||||
impl Display for FileTreeDiff {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
|
||||
write!(
|
||||
f,
|
||||
"additions: {} removals: {} changes: {}",
|
||||
self.additions.len(),
|
||||
self.removals.len(),
|
||||
self.changes.len()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl FileTreeDiff {
|
||||
pub(crate) fn count(&self) -> usize {
|
||||
self.additions.len() + self.removals.len() + self.changes.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl FileMetadata {
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) fn new_from_path<P: openat::AsPath>(
|
||||
dir: &openat::Dir,
|
||||
name: P,
|
||||
) -> Result<FileMetadata> {
|
||||
let mut r = dir.open_file(name)?;
|
||||
let meta = r.metadata()?;
|
||||
let mut hasher =
|
||||
Hasher::new(MessageDigest::sha512()).expect("openssl sha512 hasher creation failed");
|
||||
let _ = std::io::copy(&mut r, &mut hasher)?;
|
||||
let digest = SHA512String::from_hasher(&mut hasher);
|
||||
Ok(FileMetadata {
|
||||
size: meta.len(),
|
||||
sha512: digest,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FileTree {
|
||||
// Internal helper to generate a sub-tree
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
fn unsorted_from_dir(dir: &openat::Dir) -> Result<HashMap<String, FileMetadata>> {
|
||||
let mut ret = HashMap::new();
|
||||
for entry in dir.list_dir(".")? {
|
||||
let entry = entry?;
|
||||
let Some(name) = entry.file_name().to_str() else {
|
||||
bail!("Invalid UTF-8 filename: {:?}", entry.file_name())
|
||||
};
|
||||
if name.starts_with(TMP_PREFIX) {
|
||||
bail!("File {} contains our temporary prefix!", name);
|
||||
}
|
||||
match dir.get_file_type(&entry)? {
|
||||
openat::SimpleType::File => {
|
||||
let meta = FileMetadata::new_from_path(dir, name)?;
|
||||
let _ = ret.insert(name.to_string(), meta);
|
||||
}
|
||||
openat::SimpleType::Dir => {
|
||||
let child = dir.sub_dir(name)?;
|
||||
for (mut k, v) in FileTree::unsorted_from_dir(&child)?.drain() {
|
||||
k.reserve(name.len() + 1);
|
||||
k.insert(0, '/');
|
||||
k.insert_str(0, name);
|
||||
let _ = ret.insert(k, v);
|
||||
}
|
||||
}
|
||||
openat::SimpleType::Symlink => {
|
||||
bail!("Unsupported symbolic link {:?}", entry.file_name())
|
||||
}
|
||||
openat::SimpleType::Other => {
|
||||
bail!("Unsupported non-file/directory {:?}", entry.file_name())
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Create a FileTree from the target directory.
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) fn new_from_dir(dir: &openat::Dir) -> Result<Self> {
|
||||
let mut children = BTreeMap::new();
|
||||
for (k, v) in Self::unsorted_from_dir(dir)?.drain() {
|
||||
children.insert(k, v);
|
||||
}
|
||||
|
||||
Ok(Self { children })
|
||||
}
|
||||
|
||||
/// Determine the changes *from* self to the updated tree
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) fn diff(&self, updated: &Self) -> Result<FileTreeDiff> {
|
||||
self.diff_impl(updated, true)
|
||||
}
|
||||
|
||||
/// Determine any changes only using the files tracked in self as
|
||||
/// a reference. In other words, this will ignore any unknown
|
||||
/// files and not count them as additions.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn changes(&self, current: &Self) -> Result<FileTreeDiff> {
|
||||
self.diff_impl(current, false)
|
||||
}
|
||||
|
||||
/// The inverse of `changes` - determine if there are any files
|
||||
/// changed or added in `current` compared to self.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn updates(&self, current: &Self) -> Result<FileTreeDiff> {
|
||||
current.diff_impl(self, false)
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
fn diff_impl(&self, updated: &Self, check_additions: bool) -> Result<FileTreeDiff> {
|
||||
let mut additions = HashSet::new();
|
||||
let mut removals = HashSet::new();
|
||||
let mut changes = HashSet::new();
|
||||
|
||||
for (k, v1) in self.children.iter() {
|
||||
if let Some(v2) = updated.children.get(k) {
|
||||
if v1 != v2 {
|
||||
changes.insert(k.clone());
|
||||
}
|
||||
} else {
|
||||
removals.insert(k.clone());
|
||||
}
|
||||
}
|
||||
if check_additions {
|
||||
for k in updated.children.keys() {
|
||||
if self.children.contains_key(k) {
|
||||
continue;
|
||||
}
|
||||
additions.insert(k.clone());
|
||||
}
|
||||
}
|
||||
Ok(FileTreeDiff {
|
||||
additions,
|
||||
removals,
|
||||
changes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a diff from a target directory. This will ignore
|
||||
/// any files or directories that are not part of the original tree.
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) fn relative_diff_to(&self, dir: &openat::Dir) -> Result<FileTreeDiff> {
|
||||
let mut removals = HashSet::new();
|
||||
let mut changes = HashSet::new();
|
||||
|
||||
for (path, info) in self.children.iter() {
|
||||
assert!(!path.starts_with('/'));
|
||||
|
||||
if let Some(meta) = dir.metadata_optional(path)? {
|
||||
match meta.simple_type() {
|
||||
openat::SimpleType::File => {
|
||||
let target_info = FileMetadata::new_from_path(dir, path)?;
|
||||
if info != &target_info {
|
||||
changes.insert(path.clone());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// If a file became a directory
|
||||
changes.insert(path.clone());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
removals.insert(path.clone());
|
||||
}
|
||||
}
|
||||
Ok(FileTreeDiff {
|
||||
additions: HashSet::new(),
|
||||
removals,
|
||||
changes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively remove all files/dirs in the directory that start with our TMP_PREFIX
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
fn cleanup_tmp(dir: &openat::Dir) -> Result<()> {
|
||||
for entry in dir.list_dir(".")? {
|
||||
let entry = entry?;
|
||||
let Some(name) = entry.file_name().to_str() else {
|
||||
// Skip invalid UTF-8 for now, we will barf on it later though.
|
||||
continue;
|
||||
};
|
||||
|
||||
match dir.get_file_type(&entry)? {
|
||||
openat::SimpleType::Dir => {
|
||||
if name.starts_with(TMP_PREFIX) {
|
||||
dir.remove_all(name)?;
|
||||
continue;
|
||||
} else {
|
||||
let child = dir.sub_dir(name)?;
|
||||
cleanup_tmp(&child)?;
|
||||
}
|
||||
}
|
||||
openat::SimpleType::File => {
|
||||
if name.starts_with(TMP_PREFIX) {
|
||||
dir.remove_file(name)?;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) struct ApplyUpdateOptions {
|
||||
pub(crate) skip_removals: bool,
|
||||
pub(crate) skip_sync: bool,
|
||||
}
|
||||
|
||||
/// Copy from src to dst at root dir
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
fn copy_dir(root: &openat::Dir, src: &str, dst: &str) -> Result<()> {
|
||||
use bootc_internal_utils::CommandRunExt;
|
||||
use std::os::unix::process::CommandExt;
|
||||
use std::process::Command;
|
||||
|
||||
let rootfd = unsafe { BorrowedFd::borrow_raw(root.as_raw_fd()) };
|
||||
unsafe {
|
||||
Command::new("cp")
|
||||
.args(["-a"])
|
||||
.arg(src)
|
||||
.arg(dst)
|
||||
.pre_exec(move || rustix::process::fchdir(rootfd).map_err(Into::into))
|
||||
.run()?
|
||||
};
|
||||
log::debug!("Copy {src} to {dst}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get first sub dir and tmp sub dir for the path
|
||||
/// "fedora/foo/bar" -> ("fedora", ".btmp.fedora")
|
||||
/// "foo" -> ("foo", ".btmp.foo")
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
fn get_first_dir(path: &Utf8Path) -> Result<(&Utf8Path, String)> {
|
||||
let first = path
|
||||
.iter()
|
||||
.next()
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid path: {path}"))?;
|
||||
let mut tmp = first.to_owned();
|
||||
tmp.insert_str(0, TMP_PREFIX);
|
||||
Ok((first.into(), tmp))
|
||||
}
|
||||
|
||||
/// Given two directories, apply a diff generated from srcdir to destdir
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) fn apply_diff(
|
||||
srcdir: &openat::Dir,
|
||||
destdir: &openat::Dir,
|
||||
diff: &FileTreeDiff,
|
||||
opts: Option<&ApplyUpdateOptions>,
|
||||
) -> Result<()> {
|
||||
let default_opts = ApplyUpdateOptions {
|
||||
..Default::default()
|
||||
};
|
||||
let opts = opts.unwrap_or(&default_opts);
|
||||
cleanup_tmp(destdir).context("cleaning up temporary files")?;
|
||||
|
||||
let mut updates = HashMap::new();
|
||||
// Handle removals in temp dir, or remove directly if file not in dir
|
||||
if !opts.skip_removals {
|
||||
for pathstr in diff.removals.iter() {
|
||||
let path = Utf8Path::new(pathstr);
|
||||
let (first_dir, first_dir_tmp) = get_first_dir(path)?;
|
||||
let path_tmp;
|
||||
if first_dir != path {
|
||||
path_tmp = Utf8Path::new(&first_dir_tmp).join(path.strip_prefix(&first_dir)?);
|
||||
// copy to temp dir and remember
|
||||
// skip copying if dir not existed in dest
|
||||
if !destdir.exists(&first_dir_tmp)? && destdir.exists(first_dir.as_std_path())? {
|
||||
copy_dir(destdir, first_dir.as_str(), &first_dir_tmp).with_context(|| {
|
||||
format!("copy {first_dir} to {first_dir_tmp} before removing {pathstr}")
|
||||
})?;
|
||||
updates.insert(first_dir, first_dir_tmp);
|
||||
}
|
||||
} else {
|
||||
path_tmp = path.to_path_buf();
|
||||
}
|
||||
destdir
|
||||
.remove_file_optional(path_tmp.as_std_path())
|
||||
.with_context(|| format!("removing {:?}", path_tmp))?;
|
||||
}
|
||||
}
|
||||
// Write changed or new files to temp dir or temp file
|
||||
for pathstr in diff.changes.iter().chain(diff.additions.iter()) {
|
||||
let path = Utf8Path::new(pathstr);
|
||||
let (first_dir, first_dir_tmp) = get_first_dir(path)?;
|
||||
let mut path_tmp = Utf8PathBuf::from(&first_dir_tmp);
|
||||
if first_dir != path {
|
||||
if !destdir.exists(&first_dir_tmp)? && destdir.exists(first_dir.as_std_path())? {
|
||||
// copy to temp dir if not exists
|
||||
copy_dir(destdir, first_dir.as_str(), &first_dir_tmp).with_context(|| {
|
||||
format!("copy {first_dir} to {first_dir_tmp} before updating {pathstr}")
|
||||
})?;
|
||||
}
|
||||
path_tmp = path_tmp.join(path.strip_prefix(&first_dir)?);
|
||||
// ensure new additions dir exists
|
||||
if let Some(parent) = path_tmp.parent() {
|
||||
destdir.ensure_dir_all(parent.as_std_path(), DEFAULT_FILE_MODE)?;
|
||||
}
|
||||
// remove changed file before copying
|
||||
destdir
|
||||
.remove_file_optional(path_tmp.as_std_path())
|
||||
.with_context(|| format!("removing {path_tmp} before copying"))?;
|
||||
}
|
||||
updates.insert(first_dir, first_dir_tmp);
|
||||
srcdir
|
||||
.copy_file_at(path.as_std_path(), destdir, path_tmp.as_std_path())
|
||||
.with_context(|| format!("copying {:?} to {:?}", path, path_tmp))?;
|
||||
}
|
||||
|
||||
// do local exchange or rename
|
||||
for (dst, tmp) in updates.iter() {
|
||||
let dst = dst.as_std_path();
|
||||
log::trace!("doing local exchange for {} and {:?}", tmp, dst);
|
||||
if destdir.exists(dst)? {
|
||||
destdir
|
||||
.local_exchange(tmp, dst)
|
||||
.with_context(|| format!("exchange for {} and {:?}", tmp, dst))?;
|
||||
} else {
|
||||
destdir
|
||||
.local_rename(tmp, dst)
|
||||
.with_context(|| format!("rename for {} and {:?}", tmp, dst))?;
|
||||
}
|
||||
crate::try_fail_point!("update::exchange");
|
||||
}
|
||||
// Ensure all of the updates & changes are written persistently to disk
|
||||
if !opts.skip_sync {
|
||||
destdir.syncfs()?;
|
||||
}
|
||||
|
||||
// finally remove the temp dir
|
||||
for (_, tmp) in updates.iter() {
|
||||
log::trace!("cleanup: {}", tmp);
|
||||
destdir.remove_all(tmp).context("clean up temp")?;
|
||||
}
|
||||
// A second full filesystem sync to narrow any races rather than
|
||||
// waiting for writeback to kick in.
|
||||
if !opts.skip_sync {
|
||||
fsfreeze_thaw_cycle(destdir.open_file(".")?)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
fn run_diff(a: &openat::Dir, b: &openat::Dir) -> Result<FileTreeDiff> {
|
||||
let ta = FileTree::new_from_dir(a)?;
|
||||
let tb = FileTree::new_from_dir(b)?;
|
||||
let diff = ta.diff(&tb)?;
|
||||
Ok(diff)
|
||||
}
|
||||
|
||||
fn test_one_apply<AP: AsRef<Path>, BP: AsRef<Path>>(
|
||||
a: AP,
|
||||
b: BP,
|
||||
opts: Option<&ApplyUpdateOptions>,
|
||||
) -> Result<()> {
|
||||
let a = a.as_ref();
|
||||
let b = b.as_ref();
|
||||
let t = tempfile::tempdir()?;
|
||||
let c = t.path().join("c");
|
||||
let r = std::process::Command::new("cp")
|
||||
.arg("-rp")
|
||||
.args([a, &c])
|
||||
.status()?;
|
||||
if !r.success() {
|
||||
bail!("failed to cp");
|
||||
};
|
||||
let c = openat::Dir::open(&c)?;
|
||||
let da = openat::Dir::open(a)?;
|
||||
let db = openat::Dir::open(b)?;
|
||||
let ta = FileTree::new_from_dir(&da)?;
|
||||
let tb = FileTree::new_from_dir(&db)?;
|
||||
let diff = ta.diff(&tb)?;
|
||||
let rdiff = tb.diff(&ta)?;
|
||||
assert_eq!(diff.count(), rdiff.count());
|
||||
assert_eq!(diff.additions.len(), rdiff.removals.len());
|
||||
assert_eq!(diff.changes.len(), rdiff.changes.len());
|
||||
apply_diff(&db, &c, &diff, opts)?;
|
||||
let tc = FileTree::new_from_dir(&c)?;
|
||||
let newdiff = tb.diff(&tc)?;
|
||||
let skip_removals = opts.map(|o| o.skip_removals).unwrap_or(false);
|
||||
if skip_removals {
|
||||
let n = newdiff.count();
|
||||
if n != 0 {
|
||||
assert_eq!(n, diff.removals.len());
|
||||
}
|
||||
for f in diff.removals.iter() {
|
||||
assert!(c.exists(f)?);
|
||||
assert!(da.exists(f)?);
|
||||
}
|
||||
} else {
|
||||
assert_eq!(newdiff.count(), 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_apply<AP: AsRef<Path>, BP: AsRef<Path>>(a: AP, b: BP) -> Result<()> {
|
||||
let a = a.as_ref();
|
||||
let b = b.as_ref();
|
||||
let skip_removals = ApplyUpdateOptions {
|
||||
skip_removals: true,
|
||||
..Default::default()
|
||||
};
|
||||
test_one_apply(a, b, None).context("testing apply (with removals)")?;
|
||||
test_one_apply(a, b, Some(&skip_removals)).context("testing apply (skipping removals)")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filetree() -> Result<()> {
|
||||
let tmpd = tempfile::tempdir()?;
|
||||
let p = tmpd.path();
|
||||
let pa = p.join("a");
|
||||
let pb = p.join("b");
|
||||
std::fs::create_dir(&pa)?;
|
||||
std::fs::create_dir(&pb)?;
|
||||
let a = openat::Dir::open(&pa)?;
|
||||
let b = openat::Dir::open(&pb)?;
|
||||
let diff = run_diff(&a, &b)?;
|
||||
assert_eq!(diff.count(), 0);
|
||||
a.create_dir("foo", 0o755)?;
|
||||
let diff = run_diff(&a, &b)?;
|
||||
assert_eq!(diff.count(), 0);
|
||||
{
|
||||
let mut bar = a.write_file("foo/bar", 0o644)?;
|
||||
bar.write_all("foobarcontents".as_bytes())?;
|
||||
}
|
||||
let diff = run_diff(&a, &b)?;
|
||||
assert_eq!(diff.count(), 1);
|
||||
assert_eq!(diff.removals.len(), 1);
|
||||
let ta = FileTree::new_from_dir(&a)?;
|
||||
let tb = FileTree::new_from_dir(&b)?;
|
||||
let cdiff = ta.changes(&tb)?;
|
||||
assert_eq!(cdiff.count(), 1);
|
||||
assert_eq!(cdiff.removals.len(), 1);
|
||||
let udiff = ta.updates(&tb)?;
|
||||
assert_eq!(udiff.count(), 0);
|
||||
test_apply(&pa, &pb).context("testing apply 1")?;
|
||||
let rdiff = ta.relative_diff_to(&b)?;
|
||||
assert_eq!(rdiff.removals.len(), cdiff.removals.len());
|
||||
|
||||
b.create_dir("foo", 0o755)?;
|
||||
{
|
||||
let mut bar = b.write_file("foo/bar", 0o644)?;
|
||||
bar.write_all("foobarcontents".as_bytes())?;
|
||||
}
|
||||
let diff = run_diff(&a, &b)?;
|
||||
assert_eq!(diff.count(), 0);
|
||||
test_apply(&pa, &pb).context("testing apply 2")?;
|
||||
{
|
||||
let mut bar2 = b.write_file("foo/bar", 0o644)?;
|
||||
bar2.write_all("foobarcontents2".as_bytes())?;
|
||||
}
|
||||
let diff = run_diff(&a, &b)?;
|
||||
assert_eq!(diff.count(), 1);
|
||||
assert_eq!(diff.changes.len(), 1);
|
||||
let ta = FileTree::new_from_dir(&a)?;
|
||||
let rdiff = ta.relative_diff_to(&b)?;
|
||||
assert_eq!(rdiff.count(), diff.count());
|
||||
assert_eq!(rdiff.changes.len(), diff.changes.len());
|
||||
test_apply(&pa, &pb).context("testing apply 3")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filetree2() -> Result<()> {
|
||||
let tmpd = tempfile::tempdir()?;
|
||||
let tmpdp = tmpd.path();
|
||||
let relp = "EFI/fedora";
|
||||
let a = tmpdp.join("a");
|
||||
let b = tmpdp.join("b");
|
||||
for d in &[&a, &b] {
|
||||
let efidir = d.join(relp);
|
||||
fs::create_dir_all(&efidir)?;
|
||||
let shimdata = "shim data";
|
||||
fs::write(efidir.join("shim.x64"), shimdata)?;
|
||||
let grubdata = "grub data";
|
||||
fs::write(efidir.join("grub.x64"), grubdata)?;
|
||||
}
|
||||
fs::write(b.join(relp).join("grub.x64"), "grub data 2")?;
|
||||
let newsubp = Path::new(relp).join("subdir");
|
||||
fs::create_dir_all(b.join(&newsubp))?;
|
||||
fs::write(b.join(&newsubp).join("newgrub.x64"), "newgrub data")?;
|
||||
fs::remove_file(b.join(relp).join("shim.x64"))?;
|
||||
{
|
||||
let a = openat::Dir::open(&a)?;
|
||||
let b = openat::Dir::open(&b)?;
|
||||
let ta = FileTree::new_from_dir(&a)?;
|
||||
let tb = FileTree::new_from_dir(&b)?;
|
||||
let diff = ta.diff(&tb)?;
|
||||
assert_eq!(diff.changes.len(), 1);
|
||||
assert_eq!(diff.additions.len(), 1);
|
||||
assert_eq!(diff.count(), 3);
|
||||
super::apply_diff(&b, &a, &diff, None)?;
|
||||
}
|
||||
assert_eq!(
|
||||
String::from_utf8(std::fs::read(a.join(relp).join("grub.x64"))?)?,
|
||||
"grub data 2"
|
||||
);
|
||||
assert_eq!(
|
||||
String::from_utf8(std::fs::read(a.join(&newsubp).join("newgrub.x64"))?)?,
|
||||
"newgrub data"
|
||||
);
|
||||
assert!(!a.join(relp).join("shim.x64").exists());
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_get_first_dir() -> Result<()> {
|
||||
// test path
|
||||
let path = Utf8Path::new("foo/subdir/bar");
|
||||
let (tp, tp_tmp) = get_first_dir(path)?;
|
||||
assert_eq!(tp, Utf8Path::new("foo"));
|
||||
assert_eq!(tp_tmp, ".btmp.foo");
|
||||
// test file
|
||||
let path = Utf8Path::new("testfile");
|
||||
let (tp, tp_tmp) = get_first_dir(path)?;
|
||||
assert_eq!(tp, Utf8Path::new("testfile"));
|
||||
assert_eq!(tp_tmp, ".btmp.testfile");
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_cleanup_tmp() -> Result<()> {
|
||||
let tmpd = tempfile::tempdir()?;
|
||||
let p = tmpd.path();
|
||||
let pa = p.join("a/.btmp.a");
|
||||
let pb = p.join(".btmp.b/b");
|
||||
std::fs::create_dir_all(&pa)?;
|
||||
std::fs::create_dir_all(&pb)?;
|
||||
let dp = openat::Dir::open(p)?;
|
||||
{
|
||||
let mut buf = dp.write_file("a/foo", 0o644)?;
|
||||
buf.write_all("foocontents".as_bytes())?;
|
||||
let mut buf = dp.write_file("a/.btmp.foo", 0o644)?;
|
||||
buf.write_all("foocontents".as_bytes())?;
|
||||
let mut buf = dp.write_file(".btmp.b/foo", 0o644)?;
|
||||
buf.write_all("foocontents".as_bytes())?;
|
||||
}
|
||||
assert!(dp.exists("a/.btmp.a")?);
|
||||
assert!(dp.exists("a/foo")?);
|
||||
assert!(dp.exists("a/.btmp.foo")?);
|
||||
assert!(dp.exists("a/.btmp.a")?);
|
||||
assert!(dp.exists(".btmp.b/b")?);
|
||||
assert!(dp.exists(".btmp.b/foo")?);
|
||||
cleanup_tmp(&dp)?;
|
||||
assert!(!dp.exists("a/.btmp.a")?);
|
||||
assert!(dp.exists("a/foo")?);
|
||||
assert!(!dp.exists("a/.btmp.foo")?);
|
||||
assert!(!dp.exists(".btmp.b")?);
|
||||
Ok(())
|
||||
}
|
||||
// Waiting on https://github.com/rust-lang/rust/pull/125692
|
||||
#[cfg(not(target_env = "musl"))]
|
||||
#[test]
|
||||
fn test_apply_with_file() -> Result<()> {
|
||||
let tmpd = tempfile::tempdir()?;
|
||||
let p = tmpd.path();
|
||||
let pa = p.join("a");
|
||||
let pb = p.join("b");
|
||||
std::fs::create_dir(&pa)?;
|
||||
std::fs::create_dir(&pb)?;
|
||||
let a = openat::Dir::open(&pa)?;
|
||||
let b = openat::Dir::open(&pb)?;
|
||||
a.create_dir("foo", 0o755)?;
|
||||
a.create_dir("bar", 0o755)?;
|
||||
let foo = Path::new("foo/bar");
|
||||
let bar = Path::new("bar/foo");
|
||||
let testfile = "testfile";
|
||||
{
|
||||
let mut buf = a.write_file(foo, 0o644)?;
|
||||
buf.write_all("foocontents".as_bytes())?;
|
||||
let mut buf = a.write_file(bar, 0o644)?;
|
||||
buf.write_all("barcontents".as_bytes())?;
|
||||
let mut buf = a.write_file(testfile, 0o644)?;
|
||||
buf.write_all("testfilecontents".as_bytes())?;
|
||||
}
|
||||
|
||||
let diff = run_diff(&a, &b)?;
|
||||
assert_eq!(diff.count(), 3);
|
||||
b.create_dir("foo", 0o755)?;
|
||||
{
|
||||
let mut buf = b.write_file(foo, 0o644)?;
|
||||
buf.write_all("foocontents".as_bytes())?;
|
||||
}
|
||||
let b_btime_foo = fs::metadata(pb.join(foo))?.created()?;
|
||||
|
||||
{
|
||||
let diff = run_diff(&b, &a)?;
|
||||
assert_eq!(diff.count(), 2);
|
||||
apply_diff(&a, &b, &diff, None).context("test additional files")?;
|
||||
assert_eq!(
|
||||
String::from_utf8(std::fs::read(pb.join(testfile))?)?,
|
||||
"testfilecontents"
|
||||
);
|
||||
assert_eq!(
|
||||
String::from_utf8(std::fs::read(pb.join(bar))?)?,
|
||||
"barcontents"
|
||||
);
|
||||
// creation time is not changed for unchanged file
|
||||
let b_btime_foo_new = fs::metadata(pb.join(foo))?.created()?;
|
||||
assert_eq!(b_btime_foo_new, b_btime_foo);
|
||||
}
|
||||
{
|
||||
fs::write(pa.join(testfile), "newtestfile")?;
|
||||
fs::write(pa.join(bar), "newbar")?;
|
||||
let diff = run_diff(&b, &a)?;
|
||||
assert_eq!(diff.count(), 2);
|
||||
apply_diff(&a, &b, &diff, None).context("test changed files")?;
|
||||
assert_eq!(
|
||||
String::from_utf8(std::fs::read(pb.join(testfile))?)?,
|
||||
"newtestfile"
|
||||
);
|
||||
assert_eq!(String::from_utf8(std::fs::read(pb.join(bar))?)?, "newbar");
|
||||
// creation time is not changed for unchanged file
|
||||
let b_btime_foo_new = fs::metadata(pb.join(foo))?.created()?;
|
||||
assert_eq!(b_btime_foo_new, b_btime_foo);
|
||||
}
|
||||
{
|
||||
b.remove_file(testfile)?;
|
||||
let ta = FileTree::new_from_dir(&a)?;
|
||||
let diff = ta.relative_diff_to(&b)?;
|
||||
assert_eq!(diff.removals.len(), 1);
|
||||
apply_diff(&a, &b, &diff, None).context("test removed files with relative_diff")?;
|
||||
assert_eq!(b.exists(testfile)?, false);
|
||||
}
|
||||
{
|
||||
a.remove_file(bar)?;
|
||||
let diff = run_diff(&b, &a)?;
|
||||
assert_eq!(diff.count(), 2);
|
||||
apply_diff(&a, &b, &diff, None).context("test removed files")?;
|
||||
assert_eq!(b.exists(testfile)?, true);
|
||||
assert_eq!(b.exists(bar)?, false);
|
||||
let diff = run_diff(&b, &a)?;
|
||||
assert_eq!(diff.count(), 0);
|
||||
// creation time is not changed for unchanged file
|
||||
let b_btime_foo_new = fs::metadata(pb.join(foo))?.created()?;
|
||||
assert_eq!(b_btime_foo_new, b_btime_foo);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
50
bootupd/src/freezethaw.rs
Executable file
50
bootupd/src/freezethaw.rs
Executable file
|
|
@ -0,0 +1,50 @@
|
|||
use rustix::fd::AsFd;
|
||||
use rustix::ffi as c;
|
||||
use rustix::io::Errno;
|
||||
use rustix::ioctl::opcode;
|
||||
use rustix::{io, ioctl};
|
||||
|
||||
use crate::util::SignalTerminationGuard;
|
||||
|
||||
fn ioctl_fifreeze<Fd: AsFd>(fd: Fd) -> io::Result<()> {
|
||||
// SAFETY: `FIFREEZE` is a no-argument opcode.
|
||||
// `FIFREEZE` is defined as `_IOWR('X', 119, int)`.
|
||||
unsafe {
|
||||
let ctl = ioctl::NoArg::<{ opcode::read_write::<c::c_int>(b'X', 119) }>::new();
|
||||
ioctl::ioctl(fd, ctl)
|
||||
}
|
||||
}
|
||||
|
||||
fn ioctl_fithaw<Fd: AsFd>(fd: Fd) -> io::Result<()> {
|
||||
// SAFETY: `FITHAW` is a no-argument opcode.
|
||||
// `FITHAW` is defined as `_IOWR('X', 120, int)`.
|
||||
unsafe {
|
||||
let ctl = ioctl::NoArg::<{ opcode::read_write::<c::c_int>(b'X', 120) }>::new();
|
||||
ioctl::ioctl(fd, ctl)
|
||||
}
|
||||
}
|
||||
|
||||
/// syncfs() doesn't flush the journal on XFS,
|
||||
/// and since GRUB2 can't read the XFS journal, the system
|
||||
/// could fail to boot.
|
||||
///
|
||||
/// http://marc.info/?l=linux-fsdevel&m=149520244919284&w=2
|
||||
/// https://github.com/ostreedev/ostree/pull/1049
|
||||
///
|
||||
/// This function always call syncfs() first, then calls
|
||||
/// `ioctl(FIFREEZE)` and `ioctl(FITHAW)`, ignoring `EOPNOTSUPP` and `EPERM`
|
||||
pub(crate) fn fsfreeze_thaw_cycle<Fd: AsFd>(fd: Fd) -> anyhow::Result<()> {
|
||||
rustix::fs::syncfs(&fd)?;
|
||||
|
||||
let _guard = SignalTerminationGuard::new()?;
|
||||
|
||||
let freeze = ioctl_fifreeze(&fd);
|
||||
match freeze {
|
||||
// Ignore permissions errors (tests)
|
||||
Err(Errno::PERM) => Ok(()),
|
||||
// Ignore unsupported FS
|
||||
Err(Errno::NOTSUP) => Ok(()),
|
||||
Ok(()) => Ok(ioctl_fithaw(fd)?),
|
||||
_ => Ok(freeze?),
|
||||
}
|
||||
}
|
||||
3
bootupd/src/grub2/README.md
Executable file
3
bootupd/src/grub2/README.md
Executable file
|
|
@ -0,0 +1,3 @@
|
|||
# Static GRUB configuration files
|
||||
|
||||
These static files were taken from https://github.com/coreos/coreos-assembler/blob/5824720ec3a9ec291532b23b349b6d8d8b2e9edd/src/grub.cfg
|
||||
10
bootupd/src/grub2/configs.d/01_users.cfg
Executable file
10
bootupd/src/grub2/configs.d/01_users.cfg
Executable file
|
|
@ -0,0 +1,10 @@
|
|||
# Keep the comment for grub2-set-password
|
||||
### BEGIN /etc/grub.d/01_users ###
|
||||
if [ -f ${prefix}/user.cfg ]; then
|
||||
source ${prefix}/user.cfg
|
||||
if [ -n "${GRUB2_PASSWORD}" ]; then
|
||||
set superusers="root"
|
||||
export superusers
|
||||
password_pbkdf2 root ${GRUB2_PASSWORD}
|
||||
fi
|
||||
fi
|
||||
1
bootupd/src/grub2/configs.d/10_blscfg.cfg
Executable file
1
bootupd/src/grub2/configs.d/10_blscfg.cfg
Executable file
|
|
@ -0,0 +1 @@
|
|||
blscfg
|
||||
8
bootupd/src/grub2/configs.d/14_menu_show_once.cfg
Executable file
8
bootupd/src/grub2/configs.d/14_menu_show_once.cfg
Executable file
|
|
@ -0,0 +1,8 @@
|
|||
# Force the menu to be shown once, with a timeout of ${menu_show_once_timeout}
|
||||
# if requested by ${menu_show_once_timeout} being set in the env.
|
||||
if [ "${menu_show_once_timeout}" ]; then
|
||||
set timeout_style=menu
|
||||
set timeout="${menu_show_once_timeout}"
|
||||
unset menu_show_once_timeout
|
||||
save_env menu_show_once_timeout
|
||||
fi
|
||||
5
bootupd/src/grub2/configs.d/30_uefi-firmware.cfg
Executable file
5
bootupd/src/grub2/configs.d/30_uefi-firmware.cfg
Executable file
|
|
@ -0,0 +1,5 @@
|
|||
if [ "$grub_platform" = "efi" ]; then
|
||||
menuentry 'UEFI Firmware Settings' $menuentry_id_option 'uefi-firmware' {
|
||||
fwsetup
|
||||
}
|
||||
fi
|
||||
3
bootupd/src/grub2/configs.d/41_custom.cfg
Executable file
3
bootupd/src/grub2/configs.d/41_custom.cfg
Executable file
|
|
@ -0,0 +1,3 @@
|
|||
if [ -f $prefix/custom.cfg ]; then
|
||||
source $prefix/custom.cfg
|
||||
fi
|
||||
4
bootupd/src/grub2/configs.d/README.md
Executable file
4
bootupd/src/grub2/configs.d/README.md
Executable file
|
|
@ -0,0 +1,4 @@
|
|||
Add drop-in grub fragments into this directory to have
|
||||
them be installed into the final config.
|
||||
|
||||
The filenames must end in `.cfg`.
|
||||
24
bootupd/src/grub2/grub-static-efi.cfg
Executable file
24
bootupd/src/grub2/grub-static-efi.cfg
Executable file
|
|
@ -0,0 +1,24 @@
|
|||
if [ -e (md/md-boot) ]; then
|
||||
# The search command might pick a RAID component rather than the RAID,
|
||||
# since the /boot RAID currently uses superblock 1.0. See the comment in
|
||||
# the main grub.cfg.
|
||||
set prefix=md/md-boot
|
||||
else
|
||||
if [ -f ${config_directory}/bootuuid.cfg ]; then
|
||||
source ${config_directory}/bootuuid.cfg
|
||||
fi
|
||||
if [ -n "${BOOT_UUID}" ]; then
|
||||
search --fs-uuid "${BOOT_UUID}" --set prefix --no-floppy
|
||||
else
|
||||
search --label boot --set prefix --no-floppy
|
||||
fi
|
||||
fi
|
||||
if [ -d ($prefix)/grub2 ]; then
|
||||
set prefix=($prefix)/grub2
|
||||
configfile $prefix/grub.cfg
|
||||
else
|
||||
set prefix=($prefix)/boot/grub2
|
||||
configfile $prefix/grub.cfg
|
||||
fi
|
||||
boot
|
||||
|
||||
55
bootupd/src/grub2/grub-static-pre.cfg
Executable file
55
bootupd/src/grub2/grub-static-pre.cfg
Executable file
|
|
@ -0,0 +1,55 @@
|
|||
# This file is copied from https://github.com/coreos/coreos-assembler/blob/0eb25d1c718c88414c0b9aedd19dc56c09afbda8/src/grub.cfg
|
||||
# Changes:
|
||||
# - Dropped Ignition glue, that can be injected into platform.cfg
|
||||
# petitboot doesn't support -e and doesn't support an empty path part
|
||||
if [ -d (md/md-boot)/grub2 ]; then
|
||||
# fcct currently creates /boot RAID with superblock 1.0, which allows
|
||||
# component partitions to be read directly as filesystems. This is
|
||||
# necessary because transposefs doesn't yet rerun grub2-install on BIOS,
|
||||
# so GRUB still expects /boot to be a partition on the first disk.
|
||||
#
|
||||
# There are two consequences:
|
||||
# 1. On BIOS and UEFI, the search command might pick an individual RAID
|
||||
# component, but we want it to use the full RAID in case there are bad
|
||||
# sectors etc. The undocumented --hint option is supposed to support
|
||||
# this sort of override, but it doesn't seem to work, so we set $boot
|
||||
# directly.
|
||||
# 2. On BIOS, the "normal" module has already been loaded from an
|
||||
# individual RAID component, and $prefix still points there. We want
|
||||
# future module loads to come from the RAID, so we reset $prefix.
|
||||
# (On UEFI, the stub grub.cfg has already set $prefix properly.)
|
||||
set boot=md/md-boot
|
||||
set prefix=($boot)/grub2
|
||||
else
|
||||
if [ -f ${config_directory}/bootuuid.cfg ]; then
|
||||
source ${config_directory}/bootuuid.cfg
|
||||
fi
|
||||
if [ -n "${BOOT_UUID}" ]; then
|
||||
search --fs-uuid "${BOOT_UUID}" --set boot --no-floppy
|
||||
else
|
||||
search --label boot --set boot --no-floppy
|
||||
fi
|
||||
fi
|
||||
set root=$boot
|
||||
|
||||
if [ -f ${config_directory}/grubenv ]; then
|
||||
load_env -f ${config_directory}/grubenv
|
||||
elif [ -s $prefix/grubenv ]; then
|
||||
load_env
|
||||
fi
|
||||
|
||||
if [ -f $prefix/console.cfg ]; then
|
||||
# Source in any GRUB console settings if provided by the user/platform
|
||||
source $prefix/console.cfg
|
||||
fi
|
||||
|
||||
menuentry_id_option="--id"
|
||||
|
||||
function load_video {
|
||||
insmod all_video
|
||||
}
|
||||
|
||||
set timeout_style=menu
|
||||
set timeout=1
|
||||
|
||||
# Other package code will be injected from here
|
||||
180
bootupd/src/grubconfigs.rs
Executable file
180
bootupd/src/grubconfigs.rs
Executable file
|
|
@ -0,0 +1,180 @@
|
|||
use std::fmt::Write;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use bootc_internal_utils::CommandRunExt;
|
||||
use fn_error_context::context;
|
||||
use openat_ext::OpenatDirExt;
|
||||
|
||||
use crate::freezethaw::fsfreeze_thaw_cycle;
|
||||
|
||||
/// The subdirectory of /boot we use
|
||||
const GRUB2DIR: &str = "grub2";
|
||||
const CONFIGDIR: &str = "/usr/lib/bootupd/grub2-static";
|
||||
const DROPINDIR: &str = "configs.d";
|
||||
// The related grub files
|
||||
const GRUBENV: &str = "grubenv";
|
||||
pub(crate) const GRUBCONFIG: &str = "grub.cfg";
|
||||
pub(crate) const GRUBCONFIG_BACKUP: &str = "grub.cfg.backup";
|
||||
// File mode for /boot/grub2/grub.config
|
||||
// https://github.com/coreos/bootupd/issues/952
|
||||
const GRUBCONFIG_FILE_MODE: u32 = 0o600;
|
||||
|
||||
/// Install the static GRUB config files.
|
||||
#[context("Installing static GRUB configs")]
|
||||
pub(crate) fn install(
|
||||
target_root: &openat::Dir,
|
||||
installed_efi_vendor: Option<&str>,
|
||||
write_uuid: bool,
|
||||
) -> Result<()> {
|
||||
let bootdir = &target_root.sub_dir("boot").context("Opening /boot")?;
|
||||
let boot_is_mount = {
|
||||
let root_dev = target_root.self_metadata()?.stat().st_dev;
|
||||
let boot_dev = bootdir.self_metadata()?.stat().st_dev;
|
||||
log::debug!("root_dev={root_dev} boot_dev={boot_dev}");
|
||||
root_dev != boot_dev
|
||||
};
|
||||
|
||||
if !bootdir.exists(GRUB2DIR)? {
|
||||
bootdir.create_dir(GRUB2DIR, 0o700)?;
|
||||
}
|
||||
|
||||
let mut config = String::from("# Generated by bootupd / do not edit\n\n");
|
||||
|
||||
let pre = std::fs::read_to_string(Path::new(CONFIGDIR).join("grub-static-pre.cfg"))?;
|
||||
config.push_str(pre.as_str());
|
||||
|
||||
let dropindir = openat::Dir::open(&Path::new(CONFIGDIR).join(DROPINDIR))?;
|
||||
// Sort the files for reproducibility
|
||||
let mut entries = dropindir
|
||||
.list_dir(".")?
|
||||
.map(|e| e.map_err(anyhow::Error::msg))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
entries.sort_by(|a, b| a.file_name().cmp(b.file_name()));
|
||||
for ent in entries {
|
||||
let name = ent.file_name();
|
||||
let name = name
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("Invalid UTF-8: {name:?}"))?;
|
||||
if !name.ends_with(".cfg") {
|
||||
log::debug!("Ignoring {name}");
|
||||
continue;
|
||||
}
|
||||
writeln!(config, "\n### BEGIN {name} ###")?;
|
||||
let dropin = std::fs::read_to_string(Path::new(CONFIGDIR).join(DROPINDIR).join(name))?;
|
||||
config.push_str(dropin.as_str());
|
||||
writeln!(config, "### END {name} ###")?;
|
||||
println!("Added {name}");
|
||||
}
|
||||
|
||||
let grub2dir = bootdir.sub_dir(GRUB2DIR)?;
|
||||
grub2dir
|
||||
.write_file_contents("grub.cfg", GRUBCONFIG_FILE_MODE, config.as_bytes())
|
||||
.context("Copying grub-static.cfg")?;
|
||||
println!("Installed: grub.cfg");
|
||||
|
||||
write_grubenv(&bootdir).context("Create grubenv")?;
|
||||
|
||||
let uuid_path = if write_uuid {
|
||||
let target_fs = if boot_is_mount { bootdir } else { target_root };
|
||||
let bootfs_meta = crate::filesystem::inspect_filesystem(target_fs, ".")?;
|
||||
let bootfs_uuid = bootfs_meta
|
||||
.uuid
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to find UUID for boot"))?;
|
||||
let grub2_uuid_contents = format!("set BOOT_UUID=\"{bootfs_uuid}\"\n");
|
||||
let uuid_path = "bootuuid.cfg";
|
||||
grub2dir
|
||||
.write_file_contents(uuid_path, 0o644, grub2_uuid_contents)
|
||||
.context("Writing bootuuid.cfg")?;
|
||||
println!("Installed: bootuuid.cfg");
|
||||
Some(uuid_path)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
fsfreeze_thaw_cycle(grub2dir.open_file(".")?)?;
|
||||
|
||||
if let Some(vendordir) = installed_efi_vendor {
|
||||
log::debug!("vendordir={:?}", &vendordir);
|
||||
let vendor = PathBuf::from(vendordir);
|
||||
let target = &vendor.join("grub.cfg");
|
||||
let dest_efidir = target_root
|
||||
.sub_dir_optional("boot/efi/EFI")
|
||||
.context("Opening /boot/efi/EFI")?;
|
||||
if let Some(efidir) = dest_efidir {
|
||||
efidir
|
||||
.copy_file(&Path::new(CONFIGDIR).join("grub-static-efi.cfg"), target)
|
||||
.context("Copying static EFI")?;
|
||||
println!("Installed: {target:?}");
|
||||
if let Some(uuid_path) = uuid_path {
|
||||
let target = &vendor.join(uuid_path);
|
||||
grub2dir
|
||||
.copy_file_at(uuid_path, &efidir, target)
|
||||
.context("Writing bootuuid.cfg to efi dir")?;
|
||||
println!("Installed: {target:?}");
|
||||
}
|
||||
fsfreeze_thaw_cycle(efidir.open_file(".")?)?;
|
||||
} else {
|
||||
println!("Could not find /boot/efi/EFI when installing {target:?}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[context("Create file boot/grub2/grubenv")]
|
||||
fn write_grubenv(bootdir: &openat::Dir) -> Result<()> {
|
||||
let grubdir = &bootdir.sub_dir(GRUB2DIR).context("Opening boot/grub2")?;
|
||||
|
||||
if grubdir.exists(GRUBENV)? {
|
||||
return Ok(());
|
||||
}
|
||||
let editenv = Path::new("/usr/bin/grub2-editenv");
|
||||
if !editenv.exists() {
|
||||
anyhow::bail!("Failed to find {:?}", editenv);
|
||||
}
|
||||
|
||||
std::process::Command::new(editenv)
|
||||
.args([GRUBENV, "create"])
|
||||
.current_dir(format!("/proc/self/fd/{}", grubdir.as_raw_fd()))
|
||||
.run_with_cmd_context()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_install() -> Result<()> {
|
||||
env_logger::init();
|
||||
let td = tempfile::tempdir()?;
|
||||
let tdp = td.path();
|
||||
let td = openat::Dir::open(tdp)?;
|
||||
std::fs::create_dir_all(tdp.join("boot/grub2"))?;
|
||||
std::fs::create_dir_all(tdp.join("boot/efi/EFI/BOOT"))?;
|
||||
std::fs::create_dir_all(tdp.join("boot/efi/EFI/fedora"))?;
|
||||
install(&td, Some("fedora"), false).unwrap();
|
||||
|
||||
assert!(td.exists("boot/grub2/grub.cfg")?);
|
||||
assert!(td.exists("boot/efi/EFI/fedora/grub.cfg")?);
|
||||
Ok(())
|
||||
}
|
||||
#[test]
|
||||
fn test_write_grubenv() -> Result<()> {
|
||||
// Skip this test if grub2-editenv is not installed
|
||||
let editenv = Path::new("/usr/bin/grub2-editenv");
|
||||
if !editenv.try_exists()? {
|
||||
return Ok(());
|
||||
}
|
||||
let td = tempfile::tempdir()?;
|
||||
let tdp = td.path();
|
||||
std::fs::create_dir_all(tdp.join("boot/grub2"))?;
|
||||
let td = openat::Dir::open(&tdp.join("boot"))?;
|
||||
write_grubenv(&td)?;
|
||||
|
||||
assert!(td.exists("grub2/grubenv")?);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
84
bootupd/src/main.rs
Executable file
84
bootupd/src/main.rs
Executable file
|
|
@ -0,0 +1,84 @@
|
|||
/*!
|
||||
**Boot**loader **upd**ater.
|
||||
|
||||
This is an early prototype hidden/not-yet-standardized mechanism
|
||||
which just updates EFI for now (x86_64/aarch64/riscv64 only).
|
||||
|
||||
But in the future will hopefully gain some independence from
|
||||
ostree and also support e.g. updating the MBR etc.
|
||||
|
||||
Refs:
|
||||
* <https://github.com/coreos/fedora-coreos-tracker/issues/510>
|
||||
!*/
|
||||
|
||||
#![deny(unused_must_use)]
|
||||
// The style lints are more annoying than useful
|
||||
#![allow(clippy::style)]
|
||||
#![deny(clippy::dbg_macro)]
|
||||
|
||||
mod backend;
|
||||
#[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))]
|
||||
mod bios;
|
||||
mod blockdev;
|
||||
mod bootupd;
|
||||
mod cli;
|
||||
mod component;
|
||||
mod coreos;
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
mod efi;
|
||||
mod failpoints;
|
||||
mod filesystem;
|
||||
mod filetree;
|
||||
mod freezethaw;
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "powerpc64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
mod grubconfigs;
|
||||
mod model;
|
||||
mod model_legacy;
|
||||
mod ostreeutil;
|
||||
mod packagesystem;
|
||||
mod sha512string;
|
||||
mod util;
|
||||
|
||||
use clap::crate_name;
|
||||
|
||||
/// Binary entrypoint, for both daemon and client logic.
|
||||
fn main() {
|
||||
let _scenario = fail::FailScenario::setup();
|
||||
let exit_code = run_cli();
|
||||
std::process::exit(exit_code);
|
||||
}
|
||||
|
||||
/// CLI logic.
|
||||
fn run_cli() -> i32 {
|
||||
// Parse command-line options.
|
||||
let args: Vec<_> = std::env::args().collect();
|
||||
let cli_opts = cli::MultiCall::from_args(args);
|
||||
|
||||
// Setup logging.
|
||||
env_logger::Builder::from_default_env()
|
||||
.format_timestamp(None)
|
||||
.format_module_path(false)
|
||||
.filter(Some(crate_name!()), cli_opts.loglevel())
|
||||
.init();
|
||||
|
||||
log::trace!("executing cli");
|
||||
|
||||
// Dispatch CLI subcommand.
|
||||
match cli_opts.run() {
|
||||
Ok(_) => libc::EXIT_SUCCESS,
|
||||
Err(e) => {
|
||||
// Use the alternative formatter to get everything on a single line... it reads better.
|
||||
eprintln!("error: {:#}", e);
|
||||
libc::EXIT_FAILURE
|
||||
}
|
||||
}
|
||||
}
|
||||
170
bootupd/src/model.rs
Executable file
170
bootupd/src/model.rs
Executable file
|
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
use chrono::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
/// The directory where updates are stored
|
||||
pub(crate) const BOOTUPD_UPDATES_DIR: &str = "usr/lib/bootupd/updates";
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct ContentMetadata {
|
||||
/// The timestamp, which is used to determine update availability
|
||||
pub(crate) timestamp: DateTime<Utc>,
|
||||
/// Human readable version number, like ostree it is not ever parsed, just displayed
|
||||
pub(crate) version: String,
|
||||
}
|
||||
|
||||
impl ContentMetadata {
|
||||
/// Returns `true` if `target` is different and chronologically newer
|
||||
pub(crate) fn can_upgrade_to(&self, target: &Self) -> bool {
|
||||
if self.version == target.version {
|
||||
return false;
|
||||
}
|
||||
target.timestamp > self.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct InstalledContent {
|
||||
/// Associated metadata
|
||||
pub(crate) meta: ContentMetadata,
|
||||
/// Human readable version number, like ostree it is not ever parsed, just displayed
|
||||
pub(crate) filetree: Option<crate::filetree::FileTree>,
|
||||
/// The version this was originally adopted from
|
||||
pub(crate) adopted_from: Option<ContentMetadata>,
|
||||
}
|
||||
|
||||
/// Will be serialized into /boot/bootupd-state.json
|
||||
#[derive(Serialize, Deserialize, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub(crate) struct SavedState {
|
||||
/// Maps a component name to its currently installed version
|
||||
pub(crate) installed: BTreeMap<String, InstalledContent>,
|
||||
/// Maps a component name to an in progress update
|
||||
pub(crate) pending: Option<BTreeMap<String, ContentMetadata>>,
|
||||
/// If static bootloader configs are enabled, this contains the version
|
||||
pub(crate) static_configs: Option<ContentMetadata>,
|
||||
}
|
||||
|
||||
/// The status of an individual component.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) enum ComponentUpdatable {
|
||||
NoUpdateAvailable,
|
||||
AtLatestVersion,
|
||||
Upgradable,
|
||||
WouldDowngrade,
|
||||
}
|
||||
|
||||
impl ComponentUpdatable {
|
||||
pub(crate) fn from_metadata(from: &ContentMetadata, to: Option<&ContentMetadata>) -> Self {
|
||||
match to {
|
||||
Some(to) => {
|
||||
if from.version == to.version {
|
||||
ComponentUpdatable::AtLatestVersion
|
||||
} else if from.can_upgrade_to(to) {
|
||||
ComponentUpdatable::Upgradable
|
||||
} else {
|
||||
ComponentUpdatable::WouldDowngrade
|
||||
}
|
||||
}
|
||||
None => ComponentUpdatable::NoUpdateAvailable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The status of an individual component.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct ComponentStatus {
|
||||
/// Currently installed version
|
||||
pub(crate) installed: ContentMetadata,
|
||||
/// In progress update that was interrupted
|
||||
pub(crate) interrupted: Option<ContentMetadata>,
|
||||
/// Update in the deployed filesystem tree
|
||||
pub(crate) update: Option<ContentMetadata>,
|
||||
/// Is true if the version in `update` is different from `installed`
|
||||
pub(crate) updatable: ComponentUpdatable,
|
||||
/// Originally adopted version
|
||||
pub(crate) adopted_from: Option<ContentMetadata>,
|
||||
}
|
||||
|
||||
/// Information on a component that can be adopted
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct Adoptable {
|
||||
/// A synthetic version
|
||||
pub(crate) version: ContentMetadata,
|
||||
/// True if we are likely to be able to reliably update this system
|
||||
pub(crate) confident: bool,
|
||||
}
|
||||
|
||||
/// Representation of bootupd's worldview at a point in time.
|
||||
/// This is intended to be a stable format that is output by `bootupctl status --json`
|
||||
/// and parsed by higher level management tools. Transitively then
|
||||
/// everything referenced from here should also be stable.
|
||||
#[derive(Serialize, Deserialize, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub(crate) struct Status {
|
||||
/// Maps a component name to status
|
||||
pub(crate) components: BTreeMap<String, ComponentStatus>,
|
||||
/// Components that appear to be installed, not via bootupd
|
||||
pub(crate) adoptable: BTreeMap<String, Adoptable>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
use chrono::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_meta_compare() {
|
||||
let t = Utc::now();
|
||||
let a = ContentMetadata {
|
||||
timestamp: t,
|
||||
version: "v1".into(),
|
||||
};
|
||||
let b = ContentMetadata {
|
||||
timestamp: t + Duration::try_seconds(1).unwrap(),
|
||||
version: "v2".into(),
|
||||
};
|
||||
assert!(a.can_upgrade_to(&b));
|
||||
assert!(!b.can_upgrade_to(&a));
|
||||
}
|
||||
|
||||
/// Validate we're not breaking the serialized format of /boot/bootupd-state.json
|
||||
#[test]
|
||||
fn test_deserialize_state() -> Result<()> {
|
||||
let data = include_str!("../tests/fixtures/example-state-v0.json");
|
||||
let state: SavedState = serde_json::from_str(data)?;
|
||||
let efi = state.installed.get("EFI").expect("EFI");
|
||||
assert_eq!(
|
||||
efi.meta.version,
|
||||
"grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate we're not breaking the serialized format of `bootupctl status --json`
|
||||
#[test]
|
||||
fn test_deserialize_status() -> Result<()> {
|
||||
let data = include_str!("../tests/fixtures/example-status-v0.json");
|
||||
let status: Status = serde_json::from_str(data)?;
|
||||
let efi = status.components.get("EFI").expect("EFI");
|
||||
assert_eq!(
|
||||
efi.installed.version,
|
||||
"grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
101
bootupd/src/model_legacy.rs
Executable file
101
bootupd/src/model_legacy.rs
Executable file
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
//! Implementation of the original bootupd data format, which is the same
|
||||
//! as the current one except that the date is defined to be in UTC.
|
||||
|
||||
use crate::model::ContentMetadata as NewContentMetadata;
|
||||
use crate::model::InstalledContent as NewInstalledContent;
|
||||
use crate::model::SavedState as NewSavedState;
|
||||
use chrono::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct ContentMetadata01 {
|
||||
/// The timestamp, which is used to determine update availability
|
||||
pub(crate) timestamp: NaiveDateTime,
|
||||
/// Human readable version number, like ostree it is not ever parsed, just displayed
|
||||
pub(crate) version: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub(crate) struct InstalledContent01 {
|
||||
/// Associated metadata
|
||||
pub(crate) meta: ContentMetadata01,
|
||||
/// File tree
|
||||
pub(crate) filetree: Option<crate::filetree::FileTree>,
|
||||
}
|
||||
|
||||
/// Will be serialized into /boot/bootupd-state.json
|
||||
#[derive(Serialize, Deserialize, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub(crate) struct SavedState01 {
|
||||
/// Maps a component name to its currently installed version
|
||||
pub(crate) installed: BTreeMap<String, InstalledContent01>,
|
||||
/// Maps a component name to an in progress update
|
||||
pub(crate) pending: Option<BTreeMap<String, ContentMetadata01>>,
|
||||
}
|
||||
|
||||
impl ContentMetadata01 {
|
||||
pub(crate) fn upconvert(self) -> NewContentMetadata {
|
||||
let timestamp = self.timestamp.and_utc();
|
||||
NewContentMetadata {
|
||||
timestamp,
|
||||
version: self.version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InstalledContent01 {
|
||||
pub(crate) fn upconvert(self) -> NewInstalledContent {
|
||||
NewInstalledContent {
|
||||
meta: self.meta.upconvert(),
|
||||
filetree: self.filetree,
|
||||
adopted_from: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SavedState01 {
|
||||
pub(crate) fn upconvert(self) -> NewSavedState {
|
||||
let mut r: NewSavedState = Default::default();
|
||||
for (k, v) in self.installed {
|
||||
r.installed.insert(k, v.upconvert());
|
||||
}
|
||||
r
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
|
||||
/// Validate we're not breaking the serialized format of `bootupctl status --json`
|
||||
#[test]
|
||||
fn test_deserialize_status() -> Result<()> {
|
||||
let data = include_str!("../tests/fixtures/example-state-v0-legacy.json");
|
||||
let state: SavedState01 = serde_json::from_str(data)?;
|
||||
let efi = state.installed.get("EFI").expect("EFI");
|
||||
assert_eq!(
|
||||
efi.meta.version,
|
||||
"grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
);
|
||||
let state: NewSavedState = state.upconvert();
|
||||
let efi = state.installed.get("EFI").expect("EFI");
|
||||
let t = chrono::DateTime::parse_from_rfc3339("2020-09-15T13:01:21Z")?;
|
||||
assert_eq!(t, efi.meta.timestamp);
|
||||
assert_eq!(
|
||||
efi.meta.version,
|
||||
"grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
104
bootupd/src/ostreeutil.rs
Executable file
104
bootupd/src/ostreeutil.rs
Executable file
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use log::debug;
|
||||
|
||||
/// https://github.com/coreos/rpm-ostree/pull/969/commits/dc0e8db5bd92e1f478a0763d1a02b48e57022b59
|
||||
#[cfg(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "riscv64"
|
||||
))]
|
||||
pub(crate) const BOOT_PREFIX: &str = "usr/lib/ostree-boot";
|
||||
const LEGACY_RPMOSTREE_DBPATH: &str = "usr/share/rpm";
|
||||
const SYSIMAGE_RPM_DBPATH: &str = "usr/lib/sysimage/rpm";
|
||||
|
||||
/// Returns true if the target directory contains at least one file that does
|
||||
/// not start with `.`
|
||||
fn is_nonempty_dir(path: impl AsRef<Path>) -> Result<bool> {
|
||||
let path = path.as_ref();
|
||||
let it = match std::fs::read_dir(path) {
|
||||
Ok(r) => r,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
for ent in it {
|
||||
let ent = ent?;
|
||||
let name = ent.file_name();
|
||||
if name.as_encoded_bytes().starts_with(b".") {
|
||||
continue;
|
||||
}
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub(crate) fn rpm_cmd<P: AsRef<Path>>(sysroot: P) -> Result<std::process::Command> {
|
||||
let mut c = std::process::Command::new("rpm");
|
||||
let sysroot = sysroot.as_ref();
|
||||
// Take the first non-empty database path
|
||||
let mut arg = None;
|
||||
for dbpath in [SYSIMAGE_RPM_DBPATH, LEGACY_RPMOSTREE_DBPATH] {
|
||||
let dbpath = sysroot.join(dbpath);
|
||||
if !is_nonempty_dir(&dbpath)? {
|
||||
continue;
|
||||
}
|
||||
let mut s = std::ffi::OsString::new();
|
||||
s.push("--dbpath=");
|
||||
s.push(dbpath.as_os_str());
|
||||
arg = Some(s);
|
||||
break;
|
||||
}
|
||||
if let Some(arg) = arg {
|
||||
debug!("Using dbpath {arg:?}");
|
||||
c.arg(arg);
|
||||
} else {
|
||||
debug!("Failed to find dbpath");
|
||||
}
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
/// Get sysroot.bootloader in ostree repo config.
|
||||
pub(crate) fn get_ostree_bootloader() -> Result<Option<String>> {
|
||||
let mut cmd = std::process::Command::new("ostree");
|
||||
let result = cmd
|
||||
.args([
|
||||
"config",
|
||||
"--repo=/sysroot/ostree/repo",
|
||||
"get",
|
||||
"sysroot.bootloader",
|
||||
])
|
||||
.output()
|
||||
.context("Querying ostree sysroot.bootloader")?;
|
||||
if !result.status.success() {
|
||||
// ostree will exit with a none zero return code if the key does not exists
|
||||
return Ok(None);
|
||||
} else {
|
||||
let res = String::from_utf8(result.stdout)
|
||||
.with_context(|| "decoding as UTF-8 output of ostree command")?;
|
||||
let bootloader = res.trim_end().to_string();
|
||||
return Ok(Some(bootloader));
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_ostree_bootloader(bootloader: &str) -> Result<()> {
|
||||
let status = std::process::Command::new("ostree")
|
||||
.args([
|
||||
"config",
|
||||
"--repo=/sysroot/ostree/repo",
|
||||
"set",
|
||||
"sysroot.bootloader",
|
||||
bootloader,
|
||||
])
|
||||
.status()?;
|
||||
if !status.success() {
|
||||
anyhow::bail!("Failed to set 'sysroot.bootloader' to '{bootloader}' in ostree repo config");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
78
bootupd/src/packagesystem.rs
Executable file
78
bootupd/src/packagesystem.rs
Executable file
|
|
@ -0,0 +1,78 @@
|
|||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
use chrono::prelude::*;
|
||||
|
||||
use crate::model::*;
|
||||
use crate::ostreeutil;
|
||||
|
||||
/// Parse the output of `rpm -q`
|
||||
fn rpm_parse_metadata(stdout: &[u8]) -> Result<ContentMetadata> {
|
||||
let pkgs = std::str::from_utf8(stdout)?
|
||||
.split_whitespace()
|
||||
.map(|s| -> Result<_> {
|
||||
let parts: Vec<_> = s.splitn(2, ',').collect();
|
||||
let name = parts[0];
|
||||
if let Some(ts) = parts.get(1) {
|
||||
let nt = DateTime::parse_from_str(ts, "%s")
|
||||
.context("Failed to parse rpm buildtime")?
|
||||
.with_timezone(&chrono::Utc);
|
||||
Ok((name, nt))
|
||||
} else {
|
||||
bail!("Failed to parse: {}", s);
|
||||
}
|
||||
})
|
||||
.collect::<Result<BTreeMap<&str, DateTime<Utc>>>>()?;
|
||||
if pkgs.is_empty() {
|
||||
bail!("Failed to find any RPM packages matching files in source efidir");
|
||||
}
|
||||
let timestamps: BTreeSet<&DateTime<Utc>> = pkgs.values().collect();
|
||||
// Unwrap safety: We validated pkgs has at least one value above
|
||||
let largest_timestamp = timestamps.iter().last().unwrap();
|
||||
let version = pkgs.keys().fold("".to_string(), |mut s, n| {
|
||||
if !s.is_empty() {
|
||||
s.push(',');
|
||||
}
|
||||
s.push_str(n);
|
||||
s
|
||||
});
|
||||
Ok(ContentMetadata {
|
||||
timestamp: **largest_timestamp,
|
||||
version,
|
||||
})
|
||||
}
|
||||
|
||||
/// Query the rpm database and list the package and build times.
|
||||
pub(crate) fn query_files<T>(
|
||||
sysroot_path: &str,
|
||||
paths: impl IntoIterator<Item = T>,
|
||||
) -> Result<ContentMetadata>
|
||||
where
|
||||
T: AsRef<Path>,
|
||||
{
|
||||
let mut c = ostreeutil::rpm_cmd(sysroot_path)?;
|
||||
c.args(["-q", "--queryformat", "%{nevra},%{buildtime} ", "-f"]);
|
||||
for arg in paths {
|
||||
c.arg(arg.as_ref());
|
||||
}
|
||||
|
||||
let rpmout = c.output()?;
|
||||
if !rpmout.status.success() {
|
||||
std::io::stderr().write_all(&rpmout.stderr)?;
|
||||
bail!("Failed to invoke rpm -qf");
|
||||
}
|
||||
|
||||
rpm_parse_metadata(&rpmout.stdout)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_rpmout() {
|
||||
let testdata = "grub2-efi-x64-1:2.06-95.fc38.x86_64,1681321788 grub2-efi-x64-1:2.06-95.fc38.x86_64,1681321788 shim-x64-15.6-2.x86_64,1657222566 shim-x64-15.6-2.x86_64,1657222566 shim-x64-15.6-2.x86_64,1657222566";
|
||||
let parsed = rpm_parse_metadata(testdata.as_bytes()).unwrap();
|
||||
assert_eq!(
|
||||
parsed.version,
|
||||
"grub2-efi-x64-1:2.06-95.fc38.x86_64,shim-x64-15.6-2.x86_64"
|
||||
);
|
||||
}
|
||||
42
bootupd/src/sha512string.rs
Executable file
42
bootupd/src/sha512string.rs
Executable file
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
use openssl::hash::Hasher;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Ord, PartialOrd, PartialEq, Eq)]
|
||||
pub(crate) struct SHA512String(pub(crate) String);
|
||||
|
||||
impl fmt::Display for SHA512String {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl SHA512String {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn from_hasher(hasher: &mut Hasher) -> Self {
|
||||
Self(format!(
|
||||
"sha512:{}",
|
||||
hex::encode(hasher.finish().expect("completing hash"))
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use anyhow::Result;
|
||||
|
||||
#[test]
|
||||
fn test_empty() -> Result<()> {
|
||||
let mut h = Hasher::new(openssl::hash::MessageDigest::sha512())?;
|
||||
let s = SHA512String::from_hasher(&mut h);
|
||||
assert_eq!("sha512:cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", format!("{}", s));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
122
bootupd/src/util.rs
Executable file
122
bootupd/src/util.rs
Executable file
|
|
@ -0,0 +1,122 @@
|
|||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
use openat_ext::OpenatDirExt;
|
||||
|
||||
/// Parse an environment variable as UTF-8
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn getenv_utf8(n: &str) -> Result<Option<String>> {
|
||||
if let Some(v) = std::env::var_os(n) {
|
||||
Ok(Some(
|
||||
v.to_str()
|
||||
.ok_or_else(|| anyhow::anyhow!("{} is invalid UTF-8", n))?
|
||||
.to_string(),
|
||||
))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn filenames(dir: &openat::Dir) -> Result<HashSet<String>> {
|
||||
let mut ret = HashSet::new();
|
||||
for entry in dir.list_dir(".")? {
|
||||
let entry = entry?;
|
||||
let Some(name) = entry.file_name().to_str() else {
|
||||
bail!("Invalid UTF-8 filename: {:?}", entry.file_name())
|
||||
};
|
||||
match dir.get_file_type(&entry)? {
|
||||
openat::SimpleType::File => {
|
||||
ret.insert(format!("/{name}"));
|
||||
}
|
||||
openat::SimpleType::Dir => {
|
||||
let child = dir.sub_dir(name)?;
|
||||
for mut k in filenames(&child)?.drain() {
|
||||
k.reserve(name.len() + 1);
|
||||
k.insert_str(0, name);
|
||||
k.insert(0, '/');
|
||||
ret.insert(k);
|
||||
}
|
||||
}
|
||||
openat::SimpleType::Symlink => {
|
||||
bail!("Unsupported symbolic link {:?}", entry.file_name())
|
||||
}
|
||||
openat::SimpleType::Other => {
|
||||
bail!("Unsupported non-file/directory {:?}", entry.file_name())
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub(crate) fn ensure_writable_mount<P: AsRef<Path>>(p: P) -> Result<()> {
|
||||
let p = p.as_ref();
|
||||
let stat = rustix::fs::statvfs(p)?;
|
||||
if !stat.f_flag.contains(rustix::fs::StatVfsMountFlags::RDONLY) {
|
||||
return Ok(());
|
||||
}
|
||||
let status = std::process::Command::new("mount")
|
||||
.args(["-o", "remount,rw"])
|
||||
.arg(p)
|
||||
.status()?;
|
||||
if !status.success() {
|
||||
anyhow::bail!("Failed to remount {:?} writable", p);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs the provided Command object, captures its stdout, and swallows its stderr except on
|
||||
/// failure. Returns a Result<String> describing whether the command failed, and if not, its
|
||||
/// standard output. Output is assumed to be UTF-8. Errors are adequately prefixed with the full
|
||||
/// command.
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn cmd_output(cmd: &mut Command) -> Result<String> {
|
||||
let result = cmd
|
||||
.output()
|
||||
.with_context(|| format!("running {:#?}", cmd))?;
|
||||
if !result.status.success() {
|
||||
eprintln!("{}", String::from_utf8_lossy(&result.stderr));
|
||||
bail!("{:#?} failed with {}", cmd, result.status);
|
||||
}
|
||||
String::from_utf8(result.stdout)
|
||||
.with_context(|| format!("decoding as UTF-8 output of `{:#?}`", cmd))
|
||||
}
|
||||
|
||||
/// Copy from https://github.com/containers/bootc/blob/main/ostree-ext/src/container_utils.rs#L20
|
||||
/// Attempts to detect if the current process is running inside a container.
|
||||
/// This looks for the `container` environment variable or the presence
|
||||
/// of Docker or podman's more generic `/run/.containerenv`.
|
||||
/// This is a best-effort function, as there is not a 100% reliable way
|
||||
/// to determine this.
|
||||
pub fn running_in_container() -> bool {
|
||||
if std::env::var_os("container").is_some() {
|
||||
return true;
|
||||
}
|
||||
// https://stackoverflow.com/questions/20010199/how-to-determine-if-a-process-runs-inside-lxc-docker
|
||||
for p in ["/run/.containerenv", "/.dockerenv"] {
|
||||
if Path::new(p).exists() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Suppress SIGTERM while active
|
||||
// TODO: In theory we could record if we got SIGTERM and exit
|
||||
// on drop, but in practice we don't care since we're going to exit anyways.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SignalTerminationGuard(signal_hook_registry::SigId);
|
||||
|
||||
impl SignalTerminationGuard {
|
||||
pub(crate) fn new() -> Result<Self> {
|
||||
let signal = unsafe { signal_hook_registry::register(libc::SIGTERM, || {})? };
|
||||
Ok(Self(signal))
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SignalTerminationGuard {
|
||||
fn drop(&mut self) {
|
||||
signal_hook_registry::unregister(self.0);
|
||||
}
|
||||
}
|
||||
16
bootupd/systemd/bootloader-update.service
Executable file
16
bootupd/systemd/bootloader-update.service
Executable file
|
|
@ -0,0 +1,16 @@
|
|||
[Unit]
|
||||
Description=Update bootloader on boot
|
||||
Documentation=https://github.com/coreos/bootupd
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/bootupctl update
|
||||
RemainAfterExit=yes
|
||||
# Keep this stuff in sync with SYSTEMD_ARGS_BOOTUPD in general
|
||||
PrivateNetwork=yes
|
||||
ProtectHome=yes
|
||||
KillMode=mixed
|
||||
MountFlags=slave
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
110
bootupd/tests/e2e-update/e2e-update-in-vm.sh
Executable file
110
bootupd/tests/e2e-update/e2e-update-in-vm.sh
Executable file
|
|
@ -0,0 +1,110 @@
|
|||
#!/bin/bash
|
||||
# Run inside the vm spawned from e2e.sh
|
||||
set -euo pipefail
|
||||
|
||||
dn=$(cd $(dirname $0) && pwd)
|
||||
bn=$(basename $0)
|
||||
. ${dn}/../kola/data/libtest.sh
|
||||
|
||||
cd $(mktemp -d)
|
||||
|
||||
echo "Starting $0"
|
||||
|
||||
current_commit=$(rpm-ostree status --json | jq -r .deployments[0].checksum)
|
||||
|
||||
stampfile=/etc/${bn}.upgraded
|
||||
if ! test -f ${stampfile}; then
|
||||
if test "${current_commit}" = ${TARGET_COMMIT}; then
|
||||
fatal "already at ${TARGET_COMMIT}"
|
||||
fi
|
||||
|
||||
current_grub=$(rpm -q --queryformat='%{nevra}\n' ${TARGET_GRUB_NAME})
|
||||
if test "${current_grub}" == "${TARGET_GRUB_PKG}"; then
|
||||
fatal "Current grub ${current_grub} is same as target ${TARGET_GRUB_PKG}"
|
||||
fi
|
||||
|
||||
# FIXME
|
||||
# https://github.com/coreos/rpm-ostree/issues/2210
|
||||
runv setenforce 0
|
||||
runv rpm-ostree rebase /run/cosadir/tmp/repo:${TARGET_COMMIT}
|
||||
runv touch ${stampfile}
|
||||
runv systemd-run -- systemctl reboot
|
||||
touch /run/rebooting
|
||||
sleep infinity
|
||||
else
|
||||
if test "${current_commit}" != ${TARGET_COMMIT}; then
|
||||
fatal "not at ${TARGET_COMMIT}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# We did setenforce 0 above for https://github.com/coreos/rpm-ostree/issues/2210
|
||||
# Validate that on reboot we're still enforcing.
|
||||
semode=$(getenforce)
|
||||
if test "$semode" != Enforcing; then
|
||||
fatal "SELinux mode is ${semode}"
|
||||
fi
|
||||
|
||||
if ! test -n "${TARGET_GRUB_PKG}"; then
|
||||
fatal "Missing TARGET_GRUB_PKG"
|
||||
fi
|
||||
|
||||
bootupctl validate
|
||||
ok validate
|
||||
|
||||
bootupctl status | tee out.txt
|
||||
assert_file_has_content_literal out.txt 'Component EFI'
|
||||
assert_file_has_content_literal out.txt ' Installed: grub2-efi-x64-'
|
||||
assert_not_file_has_content out.txt ' Installed:.*test-bootupd-payload'
|
||||
assert_not_file_has_content out.txt ' Installed:.*'"${TARGET_GRUB_PKG}"
|
||||
assert_file_has_content out.txt 'Update: Available:.*'"${TARGET_GRUB_PKG}"
|
||||
assert_file_has_content out.txt 'Update: Available:.*test-bootupd-payload-1.0'
|
||||
bootupctl status --print-if-available > out.txt
|
||||
assert_file_has_content_literal 'out.txt' 'Updates available: BIOS EFI'
|
||||
ok update avail
|
||||
|
||||
# Mount the EFI partition.
|
||||
tmpefimount=$(mount_tmp_efi)
|
||||
|
||||
assert_not_has_file ${tmpefimount}/EFI/fedora/test-bootupd.efi
|
||||
|
||||
if env FAILPOINTS='update::exchange=return' bootupctl update -vvv 2>err.txt; then
|
||||
fatal "should have errored"
|
||||
fi
|
||||
assert_file_has_content err.txt "error: .*synthetic failpoint"
|
||||
|
||||
bootupctl update -vvv | tee out.txt
|
||||
assert_file_has_content out.txt "Previous EFI: .*"
|
||||
assert_file_has_content out.txt "Updated EFI: ${TARGET_GRUB_PKG}.*,test-bootupd-payload-1.0"
|
||||
|
||||
assert_file_has_content ${tmpefimount}/EFI/fedora/test-bootupd.efi test-payload
|
||||
|
||||
bootupctl status --print-if-available > out.txt
|
||||
if test -s out.txt; then
|
||||
fatal "Found available updates: $(cat out.txt)"
|
||||
fi
|
||||
ok update not avail
|
||||
|
||||
mount -o remount,rw /boot
|
||||
rm -f /boot/bootupd-state.json
|
||||
bootupctl adopt-and-update | tee out.txt
|
||||
assert_file_has_content out.txt "Adopted and updated: BIOS: .*"
|
||||
assert_file_has_content out.txt "Adopted and updated: EFI: .*"
|
||||
bootupctl validate
|
||||
ok adopt-and-update
|
||||
|
||||
# Verify the adoption does not fail when install files if they are missing on the disk.
|
||||
# see https://github.com/coreos/bootupd/issues/762
|
||||
rm -f /boot/bootupd-state.json
|
||||
[ -f "${tmpefimount}/EFI/fedora/test-bootupd.efi" ] && rm -f ${tmpefimount}/EFI/fedora/test-bootupd.efi
|
||||
bootupctl adopt-and-update | tee out.txt
|
||||
assert_file_has_content out.txt "Adopted and updated: BIOS: .*"
|
||||
assert_file_has_content out.txt "Adopted and updated: EFI: .*"
|
||||
if bootupctl validate 2>err.txt; then
|
||||
fatal "unexpectedly passed validation"
|
||||
fi
|
||||
|
||||
tap_finish
|
||||
touch /run/testtmp/success
|
||||
sync
|
||||
# TODO maybe try to make this use more of the exttest infrastructure?
|
||||
exec poweroff -ff
|
||||
123
bootupd/tests/e2e-update/e2e-update.sh
Executable file
123
bootupd/tests/e2e-update/e2e-update.sh
Executable file
|
|
@ -0,0 +1,123 @@
|
|||
#!/bin/bash
|
||||
# Given a coreos-assembler dir (COSA_DIR) and assuming
|
||||
# the current dir is a git repository for bootupd,
|
||||
# synthesize a test update and upgrade to it. This
|
||||
# assumes that the latest cosa build is using the
|
||||
# code we want to test (as happens in CI).
|
||||
set -euo pipefail
|
||||
|
||||
dn=$(cd $(dirname $0) && pwd)
|
||||
testprefix=$(cd ${dn} && git rev-parse --show-prefix)
|
||||
. ${dn}/../kola/data/libtest.sh
|
||||
. ${dn}/testrpmbuild.sh
|
||||
|
||||
if test -z "${COSA_DIR:-}"; then
|
||||
fatal "COSA_DIR must be set"
|
||||
fi
|
||||
# Validate source directory
|
||||
bootupd_git=$(cd ${dn} && git rev-parse --show-toplevel)
|
||||
# https://github.com/coreos/bootupd/issues/551
|
||||
! test -f ${bootupd_git}/systemd/bootupd.service
|
||||
|
||||
testtmp=$(mktemp -d -p /var/tmp bootupd-e2e.XXXXXXX)
|
||||
export test_tmpdir=${testtmp}
|
||||
|
||||
# This is new content for our update
|
||||
test_bootupd_payload_file=/boot/efi/EFI/fedora/test-bootupd.efi
|
||||
test_bootupd_payload_file1=/boot/efi/EFI/BOOT/test-bootupd1.efi
|
||||
build_rpm test-bootupd-payload \
|
||||
files "${test_bootupd_payload_file}
|
||||
${test_bootupd_payload_file1}" \
|
||||
install "mkdir -p %{buildroot}/$(dirname ${test_bootupd_payload_file})
|
||||
echo test-payload > %{buildroot}/${test_bootupd_payload_file}
|
||||
mkdir -p %{buildroot}/$(dirname ${test_bootupd_payload_file1})
|
||||
echo test-payload1 > %{buildroot}/${test_bootupd_payload_file1}"
|
||||
|
||||
# Start in cosa dir
|
||||
cd ${COSA_DIR}
|
||||
test -d builds
|
||||
|
||||
overrides=${COSA_DIR}/overrides
|
||||
test -d "${overrides}"
|
||||
mkdir -p ${overrides}/rpm
|
||||
add_override() {
|
||||
override=$1
|
||||
shift
|
||||
# This relies on "gold" grub not being pruned, and different from what's
|
||||
# in the latest fcos
|
||||
(cd ${overrides}/rpm && runv koji download-build --arch=noarch --arch=$(arch) ${override})
|
||||
}
|
||||
|
||||
if test -z "${e2e_skip_build:-}"; then
|
||||
echo "Building starting image"
|
||||
rm -f ${overrides}/rpm/*.rpm
|
||||
# Version from F42 prior to GA
|
||||
add_override grub2-2.12-26.fc42
|
||||
runv cosa build
|
||||
prev_image=$(runv cosa meta --image-path qemu)
|
||||
# Modify manifest to include `test-bootupd-payload` RPM
|
||||
runv git -C src/config checkout manifest.yaml # first make sure it's clean
|
||||
echo "packages: [test-bootupd-payload]" >> src/config/manifest.yaml
|
||||
rm -f ${overrides}/rpm/*.rpm
|
||||
echo "Building update ostree"
|
||||
# Latest (current) version in F42
|
||||
add_override grub2-2.12-28.fc42
|
||||
mv ${test_tmpdir}/yumrepo/packages/$(arch)/*.rpm ${overrides}/rpm/
|
||||
# Only build ostree update
|
||||
runv cosa build ostree
|
||||
# Undo manifest modification
|
||||
runv git -C src/config checkout manifest.yaml
|
||||
fi
|
||||
echo "Preparing test"
|
||||
grubarch=
|
||||
case $(arch) in
|
||||
x86_64) grubarch=x64;;
|
||||
aarch64) grubarch=aa64;;
|
||||
*) fatal "Unhandled arch $(arch)";;
|
||||
esac
|
||||
target_grub_name=grub2-efi-${grubarch}
|
||||
target_grub_pkg=$(rpm -qp --queryformat='%{nevra}\n' ${overrides}/rpm/${target_grub_name}-2*.rpm)
|
||||
target_commit=$(cosa meta --get-value ostree-commit)
|
||||
echo "Target commit: ${target_commit}"
|
||||
# For some reason 9p can't write to tmpfs
|
||||
|
||||
cat >${testtmp}/test.bu << EOF
|
||||
variant: fcos
|
||||
version: 1.0.0
|
||||
systemd:
|
||||
units:
|
||||
- name: bootupd-test.service
|
||||
enabled: true
|
||||
contents: |
|
||||
[Unit]
|
||||
RequiresMountsFor=/run/testtmp
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
Environment=TARGET_COMMIT=${target_commit}
|
||||
Environment=TARGET_GRUB_NAME=${target_grub_name}
|
||||
Environment=TARGET_GRUB_PKG=${target_grub_pkg}
|
||||
Environment=SRCDIR=/run/bootupd-source
|
||||
# Run via shell because selinux denies systemd writing to 9p apparently
|
||||
ExecStart=/bin/sh -c '/run/bootupd-source/${testprefix}/e2e-update-in-vm.sh &>>/run/testtmp/out.txt; test -f /run/rebooting || poweroff -ff'
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
runv butane -o ${testtmp}/test.ign ${testtmp}/test.bu
|
||||
cd ${testtmp}
|
||||
qemuexec_args=(kola qemuexec --propagate-initramfs-failure --qemu-image "${prev_image}" --qemu-firmware uefi \
|
||||
-i test.ign --bind-ro ${COSA_DIR},/run/cosadir --bind-ro ${bootupd_git},/run/bootupd-source --bind-rw ${testtmp},/run/testtmp)
|
||||
if test -n "${e2e_debug:-}"; then
|
||||
runv ${qemuexec_args[@]} --devshell
|
||||
else
|
||||
runv timeout 5m "${qemuexec_args[@]}" --console-to-file ${COSA_DIR}/tmp/console.txt
|
||||
fi
|
||||
if ! test -f ${testtmp}/success; then
|
||||
if test -s ${testtmp}/out.txt; then
|
||||
sed -e 's,^,# ,' < ${testtmp}/out.txt
|
||||
else
|
||||
echo "No out.txt created, systemd unit failed to start"
|
||||
fi
|
||||
fatal "test failed"
|
||||
fi
|
||||
echo "ok bootupd e2e"
|
||||
142
bootupd/tests/e2e-update/testrpmbuild.sh
Executable file
142
bootupd/tests/e2e-update/testrpmbuild.sh
Executable file
|
|
@ -0,0 +1,142 @@
|
|||
# Copied from rpm-ostree
|
||||
|
||||
# builds a new RPM and adds it to the testdir's repo
|
||||
# $1 - name
|
||||
# $2+ - optional, treated as directive/value pairs
|
||||
build_rpm() {
|
||||
local name=$1; shift
|
||||
# Unset, not zero https://github.com/projectatomic/rpm-ostree/issues/349
|
||||
local epoch=""
|
||||
local version=1.0
|
||||
local release=1
|
||||
local arch=x86_64
|
||||
|
||||
mkdir -p $test_tmpdir/yumrepo/{specs,packages}
|
||||
local spec=$test_tmpdir/yumrepo/specs/$name.spec
|
||||
|
||||
# write out the header
|
||||
cat > $spec << EOF
|
||||
Name: $name
|
||||
Summary: %{name}
|
||||
License: GPLv2+
|
||||
EOF
|
||||
|
||||
local build= install= files= pretrans= pre= post= posttrans= post_args=
|
||||
local verifyscript= uinfo=
|
||||
local transfiletriggerin= transfiletriggerin_patterns=
|
||||
local transfiletriggerin2= transfiletriggerin2_patterns=
|
||||
local transfiletriggerun= transfiletriggerun_patterns=
|
||||
while [ $# -ne 0 ]; do
|
||||
local section=$1; shift
|
||||
local arg=$1; shift
|
||||
case $section in
|
||||
requires)
|
||||
echo "Requires: $arg" >> $spec;;
|
||||
recommends)
|
||||
echo "Recommends: $arg" >> $spec;;
|
||||
provides)
|
||||
echo "Provides: $arg" >> $spec;;
|
||||
conflicts)
|
||||
echo "Conflicts: $arg" >> $spec;;
|
||||
post_args)
|
||||
post_args="$arg";;
|
||||
version|release|epoch|arch|build|install|files|pretrans|pre|post|posttrans|verifyscript|uinfo)
|
||||
declare $section="$arg";;
|
||||
transfiletriggerin)
|
||||
transfiletriggerin_patterns="$arg";
|
||||
declare $section="$1"; shift;;
|
||||
transfiletriggerin2)
|
||||
transfiletriggerin2_patterns="$arg";
|
||||
declare $section="$1"; shift;;
|
||||
transfiletriggerun)
|
||||
transfiletriggerun_patterns="$arg";
|
||||
declare $section="$1"; shift;;
|
||||
*)
|
||||
assert_not_reached "unhandled section $section";;
|
||||
esac
|
||||
done
|
||||
|
||||
cat >> $spec << EOF
|
||||
Version: $version
|
||||
Release: $release
|
||||
${epoch:+Epoch: $epoch}
|
||||
BuildArch: $arch
|
||||
|
||||
%description
|
||||
%{summary}
|
||||
|
||||
# by default, we create a /usr/bin/$name script which just outputs $name
|
||||
%build
|
||||
echo -e "#!/bin/sh\necho $name-$version-$release.$arch" > $name
|
||||
chmod a+x $name
|
||||
$build
|
||||
|
||||
${pretrans:+%pretrans}
|
||||
$pretrans
|
||||
|
||||
${pre:+%pre}
|
||||
$pre
|
||||
|
||||
${post:+%post} ${post_args}
|
||||
$post
|
||||
|
||||
${posttrans:+%posttrans}
|
||||
$posttrans
|
||||
|
||||
${transfiletriggerin:+%transfiletriggerin -- ${transfiletriggerin_patterns}}
|
||||
$transfiletriggerin
|
||||
|
||||
${transfiletriggerin2:+%transfiletriggerin -- ${transfiletriggerin2_patterns}}
|
||||
$transfiletriggerin2
|
||||
|
||||
${transfiletriggerun:+%transfiletriggerun -- ${transfiletriggerun_patterns}}
|
||||
$transfiletriggerun
|
||||
|
||||
${verifyscript:+%verifyscript}
|
||||
$verifyscript
|
||||
|
||||
%install
|
||||
mkdir -p %{buildroot}/usr/bin
|
||||
install $name %{buildroot}/usr/bin
|
||||
$install
|
||||
|
||||
%clean
|
||||
rm -rf %{buildroot}
|
||||
|
||||
%files
|
||||
/usr/bin/$name
|
||||
$files
|
||||
EOF
|
||||
|
||||
# because it'd be overkill to set up mock for this, let's just fool
|
||||
# rpmbuild using setarch
|
||||
local buildarch=$arch
|
||||
if [ "$arch" == "noarch" ]; then
|
||||
buildarch=$(uname -m)
|
||||
fi
|
||||
|
||||
(cd $test_tmpdir/yumrepo/specs &&
|
||||
setarch $buildarch rpmbuild --target $arch -ba $name.spec \
|
||||
--define "_topdir $PWD" \
|
||||
--define "_sourcedir $PWD" \
|
||||
--define "_specdir $PWD" \
|
||||
--define "_builddir $PWD/.build" \
|
||||
--define "_srcrpmdir $PWD" \
|
||||
--define "_rpmdir $test_tmpdir/yumrepo/packages" \
|
||||
--define "_buildrootdir $PWD")
|
||||
# use --keep-all-metadata to retain previous updateinfo
|
||||
(cd $test_tmpdir/yumrepo &&
|
||||
createrepo_c --no-database --update --keep-all-metadata .)
|
||||
# convenience function to avoid follow-up add-pkg
|
||||
if [ -n "$uinfo" ]; then
|
||||
uinfo_cmd add-pkg $uinfo $name 0 $version $release $arch
|
||||
fi
|
||||
if test '!' -f $test_tmpdir/yumrepo.repo; then
|
||||
cat > $test_tmpdir/yumrepo.repo.tmp << EOF
|
||||
[test-repo]
|
||||
name=test-repo
|
||||
baseurl=file:///$PWD/yumrepo
|
||||
EOF
|
||||
mv $test_tmpdir/yumrepo.repo{.tmp,}
|
||||
fi
|
||||
}
|
||||
33
bootupd/tests/fixtures/example-lsblk-output.json
vendored
Executable file
33
bootupd/tests/fixtures/example-lsblk-output.json
vendored
Executable file
|
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
"blockdevices": [
|
||||
{
|
||||
"path": "/dev/sr0",
|
||||
"pttype": null,
|
||||
"parttypename": null
|
||||
},{
|
||||
"path": "/dev/zram0",
|
||||
"pttype": null,
|
||||
"parttypename": null
|
||||
},{
|
||||
"path": "/dev/vda",
|
||||
"pttype": "gpt",
|
||||
"parttypename": null
|
||||
},{
|
||||
"path": "/dev/vda1",
|
||||
"pttype": "gpt",
|
||||
"parttypename": "EFI System"
|
||||
},{
|
||||
"path": "/dev/vda2",
|
||||
"pttype": "gpt",
|
||||
"parttypename": "Linux extended boot"
|
||||
},{
|
||||
"path": "/dev/vda3",
|
||||
"pttype": "gpt",
|
||||
"parttypename": "Linux filesystem"
|
||||
},{
|
||||
"path": "/dev/mapper/luks-df2d5f95-5725-44dd-83e1-81bc4cdc49b8",
|
||||
"pttype": null,
|
||||
"parttypename": null
|
||||
}
|
||||
]
|
||||
}
|
||||
48
bootupd/tests/fixtures/example-state-v0-legacy.json
vendored
Executable file
48
bootupd/tests/fixtures/example-state-v0-legacy.json
vendored
Executable file
|
|
@ -0,0 +1,48 @@
|
|||
{
|
||||
"installed": {
|
||||
"EFI": {
|
||||
"meta": {
|
||||
"timestamp": "2020-09-15T13:01:21",
|
||||
"version": "grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
},
|
||||
"filetree": {
|
||||
"timestamp": "1970-01-01T00:00:00",
|
||||
"children": {
|
||||
"BOOT/BOOTX64.EFI": {
|
||||
"size": 1210776,
|
||||
"sha512": "sha512:52e08b6e1686b19fea9e8f8d8ca51d22bba252467ceaf6db6ead8dd2dca4a0b0b02e547e50ddf1cdee225b8785f8514f6baa846bdf1ea0bf994e772daf70f2c3"
|
||||
},
|
||||
"BOOT/fbx64.efi": {
|
||||
"size": 357248,
|
||||
"sha512": "sha512:81fed5039bdd2bc53a203a1eaf56c6a6c9a95aa7ac88f037718a342205d83550f409741c8ef86b481f55ea7188ce0d661742548596f92ef97ba2a1695bc4caae"
|
||||
},
|
||||
"fedora/BOOTX64.CSV": {
|
||||
"size": 110,
|
||||
"sha512": "sha512:0c29b8ae73171ef683ba690069c1bae711e130a084a81169af33a83dfbae4e07d909c2482dbe89a96ab26e171f17c53f1de8cb13d558bc1535412ff8accf253f"
|
||||
},
|
||||
"fedora/grubx64.efi": {
|
||||
"size": 2528520,
|
||||
"sha512": "sha512:b35a6317658d07844d6bf0f96c35f2df90342b8b13a329b4429ac892351ff74fc794a97bc3d3e2d79bef4c234b49a8dd5147b71a3376f24bc956130994e9961c"
|
||||
},
|
||||
"fedora/mmx64.efi": {
|
||||
"size": 1159560,
|
||||
"sha512": "sha512:f83ea67756cfcc3ec4eb1c83104c719ba08e66abfadb94b4bd75891e237c448bbec0fdb5bd42826e291ccc3dee559af424900b3d642a7d11c5bc9f117718837a"
|
||||
},
|
||||
"fedora/shim.efi": {
|
||||
"size": 1210776,
|
||||
"sha512": "sha512:52e08b6e1686b19fea9e8f8d8ca51d22bba252467ceaf6db6ead8dd2dca4a0b0b02e547e50ddf1cdee225b8785f8514f6baa846bdf1ea0bf994e772daf70f2c3"
|
||||
},
|
||||
"fedora/shimx64-fedora.efi": {
|
||||
"size": 1204496,
|
||||
"sha512": "sha512:dc3656b90c0d1767365bea462cc94a2a3044899f510bd61a9a7ae1a9ca586e3d6189592b1ba1ee859f45614421297fa2f5353328caa615f51da5aed9ecfbf29c"
|
||||
},
|
||||
"fedora/shimx64.efi": {
|
||||
"size": 1210776,
|
||||
"sha512": "sha512:52e08b6e1686b19fea9e8f8d8ca51d22bba252467ceaf6db6ead8dd2dca4a0b0b02e547e50ddf1cdee225b8785f8514f6baa846bdf1ea0bf994e772daf70f2c3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"pending": null
|
||||
}
|
||||
47
bootupd/tests/fixtures/example-state-v0.json
vendored
Executable file
47
bootupd/tests/fixtures/example-state-v0.json
vendored
Executable file
|
|
@ -0,0 +1,47 @@
|
|||
{
|
||||
"installed": {
|
||||
"EFI": {
|
||||
"meta": {
|
||||
"timestamp": "2020-09-15T13:01:21Z",
|
||||
"version": "grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
},
|
||||
"filetree": {
|
||||
"children": {
|
||||
"BOOT/BOOTX64.EFI": {
|
||||
"size": 1210776,
|
||||
"sha512": "sha512:52e08b6e1686b19fea9e8f8d8ca51d22bba252467ceaf6db6ead8dd2dca4a0b0b02e547e50ddf1cdee225b8785f8514f6baa846bdf1ea0bf994e772daf70f2c3"
|
||||
},
|
||||
"BOOT/fbx64.efi": {
|
||||
"size": 357248,
|
||||
"sha512": "sha512:81fed5039bdd2bc53a203a1eaf56c6a6c9a95aa7ac88f037718a342205d83550f409741c8ef86b481f55ea7188ce0d661742548596f92ef97ba2a1695bc4caae"
|
||||
},
|
||||
"fedora/BOOTX64.CSV": {
|
||||
"size": 110,
|
||||
"sha512": "sha512:0c29b8ae73171ef683ba690069c1bae711e130a084a81169af33a83dfbae4e07d909c2482dbe89a96ab26e171f17c53f1de8cb13d558bc1535412ff8accf253f"
|
||||
},
|
||||
"fedora/grubx64.efi": {
|
||||
"size": 2528520,
|
||||
"sha512": "sha512:b35a6317658d07844d6bf0f96c35f2df90342b8b13a329b4429ac892351ff74fc794a97bc3d3e2d79bef4c234b49a8dd5147b71a3376f24bc956130994e9961c"
|
||||
},
|
||||
"fedora/mmx64.efi": {
|
||||
"size": 1159560,
|
||||
"sha512": "sha512:f83ea67756cfcc3ec4eb1c83104c719ba08e66abfadb94b4bd75891e237c448bbec0fdb5bd42826e291ccc3dee559af424900b3d642a7d11c5bc9f117718837a"
|
||||
},
|
||||
"fedora/shim.efi": {
|
||||
"size": 1210776,
|
||||
"sha512": "sha512:52e08b6e1686b19fea9e8f8d8ca51d22bba252467ceaf6db6ead8dd2dca4a0b0b02e547e50ddf1cdee225b8785f8514f6baa846bdf1ea0bf994e772daf70f2c3"
|
||||
},
|
||||
"fedora/shimx64-fedora.efi": {
|
||||
"size": 1204496,
|
||||
"sha512": "sha512:dc3656b90c0d1767365bea462cc94a2a3044899f510bd61a9a7ae1a9ca586e3d6189592b1ba1ee859f45614421297fa2f5353328caa615f51da5aed9ecfbf29c"
|
||||
},
|
||||
"fedora/shimx64.efi": {
|
||||
"size": 1210776,
|
||||
"sha512": "sha512:52e08b6e1686b19fea9e8f8d8ca51d22bba252467ceaf6db6ead8dd2dca4a0b0b02e547e50ddf1cdee225b8785f8514f6baa846bdf1ea0bf994e772daf70f2c3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"pending": null
|
||||
}
|
||||
26
bootupd/tests/fixtures/example-status-v0.json
vendored
Executable file
26
bootupd/tests/fixtures/example-status-v0.json
vendored
Executable file
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"components": {
|
||||
"EFI": {
|
||||
"installed": {
|
||||
"timestamp": "2020-09-15T13:01:21Z",
|
||||
"version": "grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
},
|
||||
"interrupted": null,
|
||||
"update": {
|
||||
"timestamp": "2020-09-15T13:01:21Z",
|
||||
"version": "grub2-efi-x64-1:2.04-23.fc32.x86_64,shim-x64-15-8.x86_64"
|
||||
},
|
||||
"updatable": "at-latest-version",
|
||||
"adopted-from": null
|
||||
}
|
||||
},
|
||||
"adoptable": {
|
||||
"BIOS": {
|
||||
"version": {
|
||||
"version": "grub2-bios-42.x86_64",
|
||||
"timestamp": "2020-09-15T13:01:21Z"
|
||||
},
|
||||
"confident": true
|
||||
}
|
||||
}
|
||||
}
|
||||
91
bootupd/tests/kola/data/libtest.sh
Executable file
91
bootupd/tests/kola/data/libtest.sh
Executable file
|
|
@ -0,0 +1,91 @@
|
|||
# Source library for shell script tests
|
||||
# Copyright (C) 2020 Red Hat, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
runv() {
|
||||
(set -x && "$@")
|
||||
}
|
||||
|
||||
N_TESTS=0
|
||||
ok() {
|
||||
echo "ok" $@
|
||||
N_TESTS=$((N_TESTS + 1))
|
||||
}
|
||||
|
||||
tap_finish() {
|
||||
echo "Completing TAP test with:"
|
||||
echo "1..${N_TESTS}"
|
||||
}
|
||||
|
||||
fatal() {
|
||||
echo error: $@ 1>&2; exit 1
|
||||
}
|
||||
|
||||
runv() {
|
||||
set -x
|
||||
"$@"
|
||||
}
|
||||
|
||||
# Dump ls -al + file contents to stderr, then fatal()
|
||||
_fatal_print_file() {
|
||||
file="$1"
|
||||
shift
|
||||
ls -al "$file" >&2
|
||||
sed -e 's/^/# /' < "$file" >&2
|
||||
fatal "$@"
|
||||
}
|
||||
|
||||
assert_not_has_file () {
|
||||
fpath=$1
|
||||
shift
|
||||
if test -e "$fpath"; then
|
||||
fatal "Path exists: ${fpath}"
|
||||
fi
|
||||
}
|
||||
|
||||
assert_file_has_content () {
|
||||
fpath=$1
|
||||
shift
|
||||
for re in "$@"; do
|
||||
if ! grep -q -e "$re" "$fpath"; then
|
||||
_fatal_print_file "$fpath" "File '$fpath' doesn't match regexp '$re'"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
assert_file_has_content_literal () {
|
||||
fpath=$1; shift
|
||||
for s in "$@"; do
|
||||
if ! grep -q -F -e "$s" "$fpath"; then
|
||||
_fatal_print_file "$fpath" "File '$fpath' doesn't match fixed string list '$s'"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
assert_not_file_has_content () {
|
||||
fpath=$1
|
||||
shift
|
||||
for re in "$@"; do
|
||||
if grep -q -e "$re" "$fpath"; then
|
||||
_fatal_print_file "$fpath" "File '$fpath' matches regexp '$re'"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
assert_not_file_has_content_literal () {
|
||||
fpath=$1; shift
|
||||
for s in "$@"; do
|
||||
if grep -q -F -e "$s" "$fpath"; then
|
||||
_fatal_print_file "$fpath" "File '$fpath' matches fixed string list '$s'"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Mount the EFI partition at a temporary location.
|
||||
efipart=/dev/disk/by-partlabel/EFI-SYSTEM
|
||||
mount_tmp_efi () {
|
||||
tmpmount=$(mktemp -d)
|
||||
mkdir -p ${tmpmount}
|
||||
mount ${efipart} ${tmpmount}
|
||||
echo ${tmpmount}
|
||||
}
|
||||
7
bootupd/tests/kola/raid1/config.bu
Executable file
7
bootupd/tests/kola/raid1/config.bu
Executable file
|
|
@ -0,0 +1,7 @@
|
|||
variant: fcos
|
||||
version: 1.5.0
|
||||
boot_device:
|
||||
mirror:
|
||||
devices:
|
||||
- /dev/vda
|
||||
- /dev/vdb
|
||||
1
bootupd/tests/kola/raid1/data/libtest.sh
Symbolic link
1
bootupd/tests/kola/raid1/data/libtest.sh
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../data/libtest.sh
|
||||
42
bootupd/tests/kola/raid1/test.sh
Executable file
42
bootupd/tests/kola/raid1/test.sh
Executable file
|
|
@ -0,0 +1,42 @@
|
|||
#!/bin/bash
|
||||
## kola:
|
||||
## # additionalDisks is only supported on qemu.
|
||||
## platforms: qemu
|
||||
## # Root reprovisioning requires at least 4GiB of memory.
|
||||
## minMemory: 4096
|
||||
## # Linear RAID is setup on these disks.
|
||||
## additionalDisks: ["10G"]
|
||||
## # This test includes a lot of disk I/O and needs a higher
|
||||
## # timeout value than the default.
|
||||
## timeoutMin: 15
|
||||
## description: Verify updating multiple EFIs using RAID 1 works.
|
||||
|
||||
set -xeuo pipefail
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. "$KOLA_EXT_DATA/libtest.sh"
|
||||
|
||||
tmpdir=$(mktemp -d)
|
||||
cd ${tmpdir}
|
||||
|
||||
srcdev=$(findmnt -nvr /sysroot -o SOURCE)
|
||||
[[ ${srcdev} == "/dev/md126" ]]
|
||||
|
||||
blktype=$(lsblk -o TYPE "${srcdev}" --noheadings)
|
||||
[[ ${blktype} == "raid1" ]]
|
||||
|
||||
fstype=$(findmnt -nvr /sysroot -o FSTYPE)
|
||||
[[ ${fstype} == "xfs" ]]
|
||||
ok "source is XFS on RAID1 device"
|
||||
|
||||
mount -o remount,rw /boot
|
||||
rm -f -v /boot/bootupd-state.json
|
||||
|
||||
bootupctl adopt-and-update | tee out.txt
|
||||
assert_file_has_content out.txt "Adopted and updated: BIOS: .*"
|
||||
assert_file_has_content out.txt "Adopted and updated: EFI: .*"
|
||||
|
||||
bootupctl status | tee out.txt
|
||||
assert_file_has_content_literal out.txt 'Component BIOS'
|
||||
assert_file_has_content_literal out.txt 'Component EFI'
|
||||
ok "bootupctl adopt-and-update supports multiple EFIs on RAID1"
|
||||
122
bootupd/tests/kola/test-bootupd
Executable file
122
bootupd/tests/kola/test-bootupd
Executable file
|
|
@ -0,0 +1,122 @@
|
|||
#!/bin/bash
|
||||
set -xeuo pipefail
|
||||
|
||||
. ${KOLA_EXT_DATA}/libtest.sh
|
||||
|
||||
tmpdir=$(mktemp -d)
|
||||
cd ${tmpdir}
|
||||
echo "using tmpdir: ${tmpdir}"
|
||||
touch .testtmp
|
||||
trap cleanup EXIT
|
||||
function cleanup () {
|
||||
if test -z "${TEST_SKIP_CLEANUP:-}"; then
|
||||
if test -f "${tmpdir}"/.testtmp; then
|
||||
cd /
|
||||
rm "${tmpdir}" -rf
|
||||
fi
|
||||
else
|
||||
echo "Skipping cleanup of ${tmpdir}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Mount the EFI partition.
|
||||
tmpefimount=$(mount_tmp_efi)
|
||||
bootmount=/boot
|
||||
tmpefidir=${tmpefimount}/EFI
|
||||
bootupdir=/usr/lib/bootupd/updates
|
||||
efiupdir=${bootupdir}/EFI
|
||||
ostbaseefi=/usr/lib/ostree-boot/efi/EFI
|
||||
efisubdir=fedora
|
||||
efidir=${efiupdir}/${efisubdir}
|
||||
ostefi=${ostbaseefi}/${efisubdir}
|
||||
shim=shimx64.efi
|
||||
|
||||
test -f "${efidir}/${shim}"
|
||||
|
||||
prepare_efi_update() {
|
||||
test -w /usr
|
||||
mkdir -p ${ostbaseefi}
|
||||
cp -a ${efiupdir}.orig/* ${ostbaseefi}/
|
||||
rm -rf ${efiupdir} ${bootupdir}/EFI.json
|
||||
}
|
||||
|
||||
bootupctl status > out.txt
|
||||
assert_file_has_content_literal out.txt 'Component EFI'
|
||||
assert_file_has_content_literal out.txt ' Installed: grub2-efi-x64-'
|
||||
assert_file_has_content_literal out.txt 'Update: At latest version'
|
||||
assert_file_has_content out.txt '^CoreOS aleph version:'
|
||||
ok status
|
||||
|
||||
bootupctl validate | tee out.txt
|
||||
ok validate
|
||||
|
||||
if env LANG=C.UTF-8 runuser -u bin bootupctl status 2>err.txt; then
|
||||
fatal "Was able to bootupctl status as non-root"
|
||||
fi
|
||||
assert_file_has_content err.txt 'error: This command requires root privileges'
|
||||
|
||||
# From here we'll fake updates
|
||||
test -w /usr || rpm-ostree usroverlay
|
||||
# Save a backup copy of the update dir
|
||||
cp -a ${efiupdir} ${efiupdir}.orig
|
||||
|
||||
prepare_efi_update
|
||||
# FIXME need to synthesize an RPM for this
|
||||
# echo somenewfile > ${ostefi}/somenew.efi
|
||||
rm -v ${ostefi}/shim.efi
|
||||
echo bootupd-test-changes >> ${ostefi}/grubx64.efi
|
||||
/usr/libexec/bootupd generate-update-metadata /
|
||||
ver=$(jq -r .version < ${bootupdir}/EFI.json)
|
||||
cat >ver.json << EOF
|
||||
{ "version": "${ver},test", "timestamp": "$(date -u --iso-8601=seconds)" }
|
||||
EOF
|
||||
jq -s add ${bootupdir}/EFI.json ver.json > new.json
|
||||
mv new.json ${bootupdir}/EFI.json
|
||||
|
||||
bootupctl status | tee out.txt
|
||||
assert_file_has_content_literal out.txt 'Component EFI'
|
||||
assert_file_has_content_literal out.txt ' Installed: grub2-efi-x64-'
|
||||
assert_not_file_has_content out.txt ' Installed: grub2-efi-x64.*,test'
|
||||
assert_file_has_content_literal out.txt 'Update: Available:'
|
||||
ok update avail
|
||||
|
||||
bootupctl status --json > status.json
|
||||
jq -r '.components.EFI.installed.version' < status.json > installed.txt
|
||||
assert_file_has_content installed.txt '^grub2-efi-x64'
|
||||
|
||||
bootupctl update | tee out.txt
|
||||
assert_file_has_content out.txt 'Updated EFI: grub2-efi-x64.*,test'
|
||||
|
||||
bootupctl status > out.txt
|
||||
assert_file_has_content_literal out.txt 'Component EFI'
|
||||
assert_file_has_content out.txt ' Installed: grub2-efi-x64.*,test'
|
||||
assert_file_has_content_literal out.txt 'Update: At latest version'
|
||||
ok status after update
|
||||
|
||||
bootupctl validate | tee out.txt
|
||||
ok validate after update
|
||||
|
||||
# FIXME see above
|
||||
# assert_file_has_content ${tmpefidir}/${efisubdir}/somenew.efi 'somenewfile'
|
||||
if test -f ${tmpefidir}/${efisubdir}/shim.efi; then
|
||||
fatal "failed to remove file"
|
||||
fi
|
||||
if ! grep -q 'bootupd-test-changes' ${tmpefidir}/${efisubdir}/grubx64.efi; then
|
||||
fatal "failed to update modified file"
|
||||
fi
|
||||
cmp ${tmpefidir}/${efisubdir}/shimx64.efi ${efiupdir}/${efisubdir}/shimx64.efi
|
||||
ok filesystem changes
|
||||
|
||||
bootupctl update | tee out.txt
|
||||
assert_file_has_content_literal out.txt 'No update available for any component'
|
||||
assert_not_file_has_content_literal out.txt 'Updated EFI'
|
||||
|
||||
echo "some additions" >> ${tmpefidir}/${efisubdir}/shimx64.efi
|
||||
if bootupctl validate 2>err.txt; then
|
||||
fatal "unexpectedly passed validation"
|
||||
fi
|
||||
assert_file_has_content err.txt "Changed: ${efisubdir}/shimx64.efi"
|
||||
test "$(grep -cEe '^Changed:' err.txt)" = "1"
|
||||
ok validate detected changes
|
||||
|
||||
tap_finish
|
||||
6
bootupd/tests/kolainst/Makefile
Executable file
6
bootupd/tests/kolainst/Makefile
Executable file
|
|
@ -0,0 +1,6 @@
|
|||
all:
|
||||
echo "No build step"
|
||||
|
||||
install:
|
||||
mkdir -p $(DESTDIR)/usr/lib/coreos-assembler/tests/kola/
|
||||
rsync -rlv ../kola $(DESTDIR)/usr/lib/coreos-assembler/tests/kola/bootupd
|
||||
28
bootupd/tests/tests/bootupctl-status-in-bootc.sh
Executable file
28
bootupd/tests/tests/bootupctl-status-in-bootc.sh
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
set -xeuo pipefail
|
||||
|
||||
# Verify that bootupctl status running in bootc container
|
||||
if [ ! -d "/sysroot/ostree/repo/" ]; then
|
||||
echo "Error: should run test in bootc container"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
# check if running in container
|
||||
if [ "$container" ] || [ -f /run/.containerenv ] || [ -f /.dockerenv ]; then
|
||||
arch="$(uname --machine)"
|
||||
if [[ "${arch}" == "x86_64" ]]; then
|
||||
components_text='Available components: BIOS EFI'
|
||||
components_json='{"components":["BIOS","EFI"]}'
|
||||
else
|
||||
# Assume aarch64 for now
|
||||
components_text='Available components: EFI'
|
||||
components_json='{"components":["EFI"]}'
|
||||
fi
|
||||
|
||||
output=$(bootupctl status | tr -d '\r')
|
||||
[ "${components_text}" == "${output}" ]
|
||||
output=$(bootupctl status --json)
|
||||
[ "${components_json}" == "${output}" ]
|
||||
else
|
||||
echo "Skip running as not in container"
|
||||
fi
|
||||
28
bootupd/tests/tests/move-content-to-usr.sh
Executable file
28
bootupd/tests/tests/move-content-to-usr.sh
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
set -xeuo pipefail
|
||||
|
||||
updates=/usr/lib/bootupd/updates
|
||||
rm -fv ${updates}/{BIOS,EFI}.json
|
||||
cp -r ${updates}/EFI /usr/lib/ostree-boot/efi
|
||||
# prepare /usr/lib/efi/<grub2|shim>/<ver>
|
||||
if [ ! -d "/usr/lib/efi" ]; then
|
||||
arch="$(uname --machine)"
|
||||
if [[ "${arch}" == "x86_64" ]]; then
|
||||
suffix="x64"
|
||||
else
|
||||
# Assume aarch64 for now
|
||||
suffix="aa64"
|
||||
fi
|
||||
|
||||
grub_ver=$(rpm -qa grub2-efi-${suffix} --queryformat '%{VERSION}-%{RELEASE}')
|
||||
mkdir -p /usr/lib/efi/grub2/${grub_ver}/EFI/centos
|
||||
mv ${updates}/EFI/centos/grub${suffix}.efi /usr/lib/efi/grub2/${grub_ver}/EFI/centos/
|
||||
|
||||
shim_ver=$(rpm -qa shim-${suffix} --queryformat '%{VERSION}-%{RELEASE}')
|
||||
mkdir -p /usr/lib/efi/shim/${shim_ver}/EFI/
|
||||
mv ${updates}/EFI /usr/lib/efi/shim/${shim_ver}/
|
||||
else
|
||||
rm -rf ${updates}/EFI
|
||||
fi
|
||||
bootupctl backend generate-update-metadata -vvv
|
||||
cat ${updates}/EFI.json | jq
|
||||
5
bootupd/xtask/.gitignore
vendored
Executable file
5
bootupd/xtask/.gitignore
vendored
Executable file
|
|
@ -0,0 +1,5 @@
|
|||
/target
|
||||
fastbuild*.qcow2
|
||||
_kola_temp
|
||||
.cosa
|
||||
Cargo.lock
|
||||
15
bootupd/xtask/Cargo.toml
Executable file
15
bootupd/xtask/Cargo.toml
Executable file
|
|
@ -0,0 +1,15 @@
|
|||
[package]
|
||||
name = "xtask"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.68"
|
||||
camino = "1.0"
|
||||
chrono = { version = "0.4.23", default-features = false, features = ["std"] }
|
||||
fn-error-context = "0.2.0"
|
||||
toml = "0.8"
|
||||
tempfile = "3.3"
|
||||
xshell = { version = "0.2" }
|
||||
253
bootupd/xtask/src/main.rs
Executable file
253
bootupd/xtask/src/main.rs
Executable file
|
|
@ -0,0 +1,253 @@
|
|||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader, BufWriter, Write};
|
||||
use std::process::Command;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use fn_error_context::context;
|
||||
use xshell::{cmd, Shell};
|
||||
|
||||
const NAME: &str = "bootupd";
|
||||
const VENDORPATH: &str = "vendor.tar.zstd";
|
||||
const TAR_REPRODUCIBLE_OPTS: &[&str] = &[
|
||||
"--sort=name",
|
||||
"--owner=0",
|
||||
"--group=0",
|
||||
"--numeric-owner",
|
||||
"--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime",
|
||||
];
|
||||
|
||||
fn main() {
|
||||
if let Err(e) = try_main() {
|
||||
eprintln!("error: {e:#}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn try_main() -> Result<()> {
|
||||
let task = std::env::args().nth(1);
|
||||
let sh = xshell::Shell::new()?;
|
||||
if let Some(cmd) = task.as_deref() {
|
||||
let f = match cmd {
|
||||
"vendor" => vendor,
|
||||
"package" => package,
|
||||
"package-srpm" => package_srpm,
|
||||
_ => print_help,
|
||||
};
|
||||
f(&sh)?;
|
||||
} else {
|
||||
print_help(&sh)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_target_dir() -> Result<Utf8PathBuf> {
|
||||
let target = Utf8Path::new("target");
|
||||
std::fs::create_dir_all(&target)?;
|
||||
Ok(target.to_owned())
|
||||
}
|
||||
|
||||
fn vendor(sh: &Shell) -> Result<()> {
|
||||
let _targetdir = get_target_dir()?;
|
||||
let target = VENDORPATH;
|
||||
cmd!(
|
||||
sh,
|
||||
"cargo vendor-filterer --prefix=vendor --format=tar.zstd {target}"
|
||||
)
|
||||
.run()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn gitrev_to_version(v: &str) -> String {
|
||||
let v = v.trim().trim_start_matches('v');
|
||||
v.replace('-', ".")
|
||||
}
|
||||
|
||||
#[context("Finding gitrev")]
|
||||
fn gitrev(sh: &Shell) -> Result<String> {
|
||||
if let Ok(rev) = cmd!(sh, "git describe --tags").ignore_stderr().read() {
|
||||
Ok(gitrev_to_version(&rev))
|
||||
} else {
|
||||
let mut desc = cmd!(sh, "git describe --tags --always").read()?;
|
||||
desc.insert_str(0, "0.");
|
||||
Ok(desc)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a string formatted version of the git commit timestamp, up to the minute
|
||||
/// but not second because, well, we're not going to build more than once a second.
|
||||
#[allow(dead_code)]
|
||||
#[context("Finding git timestamp")]
|
||||
fn git_timestamp(sh: &Shell) -> Result<String> {
|
||||
let ts = cmd!(sh, "git show -s --format=%ct").read()?;
|
||||
let ts = ts.trim().parse::<i64>()?;
|
||||
let ts = chrono::DateTime::from_timestamp(ts, 0)
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to parse timestamp"))?;
|
||||
Ok(ts.format("%Y%m%d%H%M").to_string())
|
||||
}
|
||||
|
||||
struct Package {
|
||||
version: String,
|
||||
srcpath: Utf8PathBuf,
|
||||
vendorpath: Utf8PathBuf,
|
||||
}
|
||||
|
||||
/// Return the timestamp of the latest git commit in seconds since the Unix epoch.
|
||||
fn git_source_date_epoch(dir: &Utf8Path) -> Result<u64> {
|
||||
let o = Command::new("git")
|
||||
.args(["log", "-1", "--pretty=%ct"])
|
||||
.current_dir(dir)
|
||||
.output()?;
|
||||
if !o.status.success() {
|
||||
anyhow::bail!("git exited with an error: {:?}", o);
|
||||
}
|
||||
let buf = String::from_utf8(o.stdout).context("Failed to parse git log output")?;
|
||||
let r = buf.trim().parse()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
|
||||
/// When using cargo-vendor-filterer --format=tar, the config generated has a bogus source
|
||||
/// directory. This edits it to refer to vendor/ as a stable relative reference.
|
||||
#[context("Editing vendor config")]
|
||||
fn edit_vendor_config(config: &str) -> Result<String> {
|
||||
let mut config: toml::Value = toml::from_str(config)?;
|
||||
let config = config.as_table_mut().unwrap();
|
||||
let source_table = config.get_mut("source").unwrap();
|
||||
let source_table = source_table.as_table_mut().unwrap();
|
||||
let vendored_sources = source_table.get_mut("vendored-sources").unwrap();
|
||||
let vendored_sources = vendored_sources.as_table_mut().unwrap();
|
||||
let previous =
|
||||
vendored_sources.insert("directory".into(), toml::Value::String("vendor".into()));
|
||||
assert!(previous.is_some());
|
||||
|
||||
Ok(config.to_string())
|
||||
}
|
||||
|
||||
#[context("Packaging")]
|
||||
fn impl_package(sh: &Shell) -> Result<Package> {
|
||||
let source_date_epoch = git_source_date_epoch(".".into())?;
|
||||
let v = gitrev(sh)?;
|
||||
|
||||
let namev = format!("{NAME}-{v}");
|
||||
let p = Utf8Path::new("target").join(format!("{namev}.tar"));
|
||||
let prefix = format!("{namev}/");
|
||||
cmd!(sh, "git archive --format=tar --prefix={prefix} -o {p} HEAD").run()?;
|
||||
// Generate the vendor directory now, as we want to embed the generated config to use
|
||||
// it in our source.
|
||||
let vendorpath = Utf8Path::new("target").join(format!("{namev}-vendor.tar.zstd"));
|
||||
let vendor_config = cmd!(
|
||||
sh,
|
||||
"cargo vendor-filterer --prefix=vendor --format=tar.zstd {vendorpath}"
|
||||
)
|
||||
.read()?;
|
||||
let vendor_config = edit_vendor_config(&vendor_config)?;
|
||||
// Append .cargo/vendor-config.toml (a made up filename) into the tar archive.
|
||||
{
|
||||
let tmpdir = tempfile::tempdir_in("target")?;
|
||||
let tmpdir_path = tmpdir.path();
|
||||
let path = tmpdir_path.join("vendor-config.toml");
|
||||
std::fs::write(&path, vendor_config)?;
|
||||
let source_date_epoch = format!("{source_date_epoch}");
|
||||
cmd!(
|
||||
sh,
|
||||
"tar -r -C {tmpdir_path} {TAR_REPRODUCIBLE_OPTS...} --mtime=@{source_date_epoch} --transform=s,^,{prefix}.cargo/, -f {p} vendor-config.toml"
|
||||
)
|
||||
.run()?;
|
||||
}
|
||||
// Compress with zstd
|
||||
let srcpath: Utf8PathBuf = format!("{p}.zstd").into();
|
||||
cmd!(sh, "zstd --rm -f {p} -o {srcpath}").run()?;
|
||||
|
||||
Ok(Package {
|
||||
version: v,
|
||||
srcpath,
|
||||
vendorpath,
|
||||
})
|
||||
}
|
||||
|
||||
fn package(sh: &Shell) -> Result<()> {
|
||||
let p = impl_package(sh)?.srcpath;
|
||||
println!("Generated: {p}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn impl_srpm(sh: &Shell) -> Result<Utf8PathBuf> {
|
||||
let pkg = impl_package(sh)?;
|
||||
vendor(sh)?;
|
||||
let td = tempfile::tempdir_in("target").context("Allocating tmpdir")?;
|
||||
let td = td.into_path();
|
||||
let td: &Utf8Path = td.as_path().try_into().unwrap();
|
||||
let srcpath = td.join(pkg.srcpath.file_name().unwrap());
|
||||
std::fs::rename(pkg.srcpath, srcpath)?;
|
||||
let v = pkg.version;
|
||||
let vendorpath = td.join(format!("{NAME}-{v}-vendor.tar.zstd"));
|
||||
std::fs::rename(VENDORPATH, vendorpath)?;
|
||||
{
|
||||
let specin = File::open(format!("contrib/packaging/{NAME}.spec"))
|
||||
.map(BufReader::new)
|
||||
.context("Opening spec")?;
|
||||
let mut o = File::create(td.join(format!("{NAME}.spec"))).map(BufWriter::new)?;
|
||||
for line in specin.lines() {
|
||||
let line = line?;
|
||||
if line.starts_with("Version:") {
|
||||
writeln!(o, "# Replaced by cargo xtask package-srpm")?;
|
||||
writeln!(o, "Version: {v}")?;
|
||||
} else {
|
||||
writeln!(o, "{}", line)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
let d = sh.push_dir(td);
|
||||
let mut cmd = cmd!(sh, "rpmbuild");
|
||||
for k in [
|
||||
"_sourcedir",
|
||||
"_specdir",
|
||||
"_builddir",
|
||||
"_srcrpmdir",
|
||||
"_rpmdir",
|
||||
] {
|
||||
cmd = cmd.arg("--define");
|
||||
cmd = cmd.arg(format!("{k} {td}"));
|
||||
}
|
||||
let spec = format!("{NAME}.spec");
|
||||
cmd.arg("--define")
|
||||
.arg(format!("_buildrootdir {td}/.build"))
|
||||
.args(["-bs", spec.as_str()])
|
||||
.run()?;
|
||||
drop(d);
|
||||
let mut srpm = None;
|
||||
for e in std::fs::read_dir(td)? {
|
||||
let e = e?;
|
||||
let n = e.file_name();
|
||||
let n = if let Some(n) = n.to_str() {
|
||||
n
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
if n.ends_with(".src.rpm") {
|
||||
srpm = Some(td.join(n));
|
||||
break;
|
||||
}
|
||||
}
|
||||
let srpm = srpm.ok_or_else(|| anyhow::anyhow!("Failed to find generated .src.rpm"))?;
|
||||
let dest = Utf8Path::new("target").join(srpm.file_name().unwrap());
|
||||
std::fs::rename(&srpm, &dest)?;
|
||||
Ok(dest)
|
||||
}
|
||||
|
||||
fn package_srpm(sh: &Shell) -> Result<()> {
|
||||
let _targetdir = get_target_dir()?;
|
||||
let srpm = impl_srpm(sh)?;
|
||||
println!("Generated: {srpm}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_help(_sh: &Shell) -> Result<()> {
|
||||
eprintln!(
|
||||
"Tasks:
|
||||
- vendor
|
||||
"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue