ci: enable ostree tests on RHEL 9.0-beta

The ostree-ng test was updated to accommodate a change in the
edge-container image type
(see https://github.com/osbuild/osbuild-composer/pull/1595).
RHEL 9.0-beta uses the old edge-container configuration, however RHEL
9.0 GA should eventually adopt the new container.

Copying the old ostree-ng test and running it separately is better than
having several conditions in the single test script to accommodate the
old behaviour and then reverting them when the changes land in RHEL 9.

Modified from the old version to assume we're using 'weldr-client'
(which has a different json output schema) and to use RHEL 9
naming/versioning where necessary.

The ansible checks were also copied to remove the kernel-rt check.

Signed-off-by: Achilleas Koutsou <achilleas@koutsou.net>
This commit is contained in:
Achilleas Koutsou 2021-09-16 13:27:10 +02:00 committed by Ondřej Budai
parent 1d0f979525
commit 0ea491e9bd
3 changed files with 1190 additions and 0 deletions

View file

@ -179,6 +179,7 @@ OSTree:
# - openstack/fedora-34-x86_64
- openstack/rhel-8-x86_64
- openstack/rhel-8.5-x86_64
- openstack/rhel-9.0-beta-nightly-x86_64
New OSTree:
stage: test
@ -191,6 +192,17 @@ New OSTree:
- RUNNER:
- openstack/rhel-8.5-x86_64-large
New OSTree (RHEL9-BETA):
stage: test
extends: OSTree
script:
- schutzbot/deploy.sh
- /usr/libexec/tests/osbuild-composer/ostree-ng-og.sh
parallel:
matrix:
- RUNNER:
- openstack/rhel-9.0-beta-nightly-x86_64
OSTree simplified installer:
stage: test
extends: OSTree

648
test/cases/ostree-ng-og.sh Normal file
View file

@ -0,0 +1,648 @@
#!/bin/bash
set -euo pipefail
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh
# Get OS data.
source /etc/os-release
ARCH=$(uname -m)
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
# Start libvirtd and test it.
greenprint "🚀 Starting libvirt daemon"
sudo systemctl start libvirtd
sudo virsh list --all > /dev/null
# Set a customized dnsmasq configuration for libvirt so we always get the
# same address on bootup.
sudo tee /tmp/integration.xml > /dev/null << EOF
<network>
<name>integration</name>
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='integration' stp='on' delay='0'/>
<mac address='52:54:00:36:46:ef'/>
<ip address='192.168.100.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.100.2' end='192.168.100.254'/>
<host mac='34:49:22:B0:83:30' name='vm-bios' ip='192.168.100.50'/>
<host mac='34:49:22:B0:83:31' name='vm-uefi' ip='192.168.100.51'/>
</dhcp>
</ip>
</network>
EOF
if ! sudo virsh net-info integration > /dev/null 2>&1; then
sudo virsh net-define /tmp/integration.xml
fi
if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then
sudo virsh net-start integration
fi
# Allow anyone in the wheel group to talk to libvirt.
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.isInGroup("adm")) {
return polkit.Result.YES;
}
});
EOF
# Set up variables.
OSTREE_REF="test/rhel/9/${ARCH}/edge"
OS_VARIANT="rhel9-unknown"
TEST_UUID=$(uuidgen)
IMAGE_KEY="osbuild-composer-ostree-test-${TEST_UUID}"
BIOS_GUEST_ADDRESS=192.168.100.50
UEFI_GUEST_ADDRESS=192.168.100.51
PROD_REPO_URL=http://192.168.100.1/repo
PROD_REPO=/var/www/html/repo
STAGE_REPO_ADDRESS=192.168.200.1
STAGE_REPO_URL="http://${STAGE_REPO_ADDRESS}/repo/"
QUAY_REPO_URL="docker://quay.io/osbuild/testing-rhel-edge-push"
QUAY_REPO_TAG=$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')
ARTIFACTS="ci-artifacts"
mkdir -p "${ARTIFACTS}"
# Set up temporary files.
TEMPDIR=$(mktemp -d)
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
KS_FILE=${TEMPDIR}/ks.cfg
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
# SSH setup.
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5)
SSH_DATA_DIR=$(/usr/libexec/osbuild-composer-test/gen-ssh.sh)
SSH_KEY=${SSH_DATA_DIR}/id_rsa
SSH_KEY_PUB=$(cat "${SSH_KEY}".pub)
if [[ "${ID}-${VERSION_ID}" != "rhel-9.0" ]]; then
echo "unsupported distro: ${ID}-${VERSION_ID}"
exit 1
fi
CONTAINER_TYPE=edge-container
CONTAINER_FILENAME=container.tar
INSTALLER_TYPE=edge-installer
INSTALLER_FILENAME=installer.iso
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
tar -xf "$TARBALL" -C "${TEMPDIR}"
rm -f "$TARBALL"
# Move the JSON file into place.
cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Build ostree image.
build_image() {
blueprint_name=$1
image_type=$2
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Stop watching the worker journal when exiting.
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
# Start the compose.
greenprint "🚀 Starting compose"
if [ $# -eq 3 ]; then
repo_url=$3
sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" --url "$repo_url" "$blueprint_name" "$image_type" | tee "$COMPOSE_START"
else
sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" "$blueprint_name" "$image_type" | tee "$COMPOSE_START"
fi
COMPOSE_ID=$(jq -r '.body.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.body.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 5
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Kill the journal monitor immediately and remove the trap
sudo pkill -P ${WORKER_JOURNAL_PID}
trap - EXIT
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
}
# Wait for the ssh server up to be.
wait_for_ssh_up () {
SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"')
if [[ $SSH_STATUS == READY ]]; then
echo 1
else
echo 0
fi
}
# Clean up our mess.
clean_up () {
greenprint "🧼 Cleaning up"
# Remove tag from quay.io repo
skopeo delete --creds "${QUAY_USERNAME}:${QUAY_PASSWORD}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
# Clear vm
if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then
sudo virsh destroy "${IMAGE_KEY}-uefi"
fi
sudo virsh undefine "${IMAGE_KEY}-uefi" --nvram
# Remove qcow2 file.
sudo rm -f "$LIBVIRT_UEFI_IMAGE_PATH"
# Remove any status containers if exist
sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f
# Remove all images
sudo podman rmi -f -a
# Remove prod repo
sudo rm -rf "$PROD_REPO"
# Remomve tmp dir.
sudo rm -rf "$TEMPDIR"
# Stop prod repo http service
sudo systemctl disable --now httpd
}
# Test result checking
check_result () {
greenprint "🎏 Checking for test result"
if [[ $RESULTS == 1 ]]; then
greenprint "💚 Success"
else
greenprint "❌ Failed"
clean_up
exit 1
fi
}
###########################################################
##
## Prepare edge prod and stage repo
##
###########################################################
greenprint "🔧 Prepare edge prod repo"
# Start prod repo web service
# osbuild-composer-tests have mod_ssl as a dependency. The package installs
# an example configuration which automatically enabled httpd on port 443, but
# that one is already in use. Remove the default configuration as it is useless
# anyway.
sudo rm -f /etc/httpd/conf.d/ssl.conf
sudo systemctl enable --now httpd.service
# Have a clean prod repo
sudo rm -rf "$PROD_REPO"
sudo mkdir -p "$PROD_REPO"
sudo ostree --repo="$PROD_REPO" init --mode=archive
sudo ostree --repo="$PROD_REPO" remote add --no-gpg-verify edge-stage "$STAGE_REPO_URL"
# Prepare stage repo network
greenprint "🔧 Prepare stage repo network"
sudo podman network inspect edge >/dev/null 2>&1 || sudo podman network create --driver=bridge --subnet=192.168.200.0/24 --ip-range=192.168.200.0/24 --gateway=192.168.200.254 edge
##########################################################
##
## edge container image for building installer image
##
##########################################################
# Write a blueprint for ostree image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "container"
description = "A base rhel-edge container image"
version = "0.0.1"
modules = []
groups = []
[[packages]]
name = "python3"
version = "*"
[[customizations.user]]
name = "admin"
description = "Administrator account"
password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl."
key = "${SSH_KEY_PUB}"
home = "/home/admin/"
groups = ["wheel"]
EOF
greenprint "📄 container blueprint"
cat "$BLUEPRINT_FILE"
# Prepare the blueprint for the compose.
greenprint "📋 Preparing container blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve container
# Build container image.
build_image container "${CONTAINER_TYPE}"
# Download the image
greenprint "📥 Downloading the container image"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
# Clear stage repo running env
greenprint "🧹 Clearing stage repo running env"
# Remove any status containers if exist
sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f
# Remove all images
sudo podman rmi -f -a
# Deal with stage repo image
greenprint "🗜 Pushing image to quay.io"
IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}"
skopeo copy --dest-creds "${QUAY_USERNAME}:${QUAY_PASSWORD}" "oci-archive:${IMAGE_FILENAME}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
greenprint "Downloading image from quay.io"
sudo podman login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_PASSWORD}"
sudo podman pull "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
sudo podman images
greenprint "🗜 Running the image"
sudo podman run -d --name rhel-edge --network edge --ip "$STAGE_REPO_ADDRESS" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
# Clear image file
sudo rm -f "$IMAGE_FILENAME"
# Wait for container to be running
until [ "$(sudo podman inspect -f '{{.State.Running}}' rhel-edge)" == "true" ]; do
sleep 1;
done;
# Sync installer edge content
greenprint "📡 Sync installer content from stage repo"
sudo ostree --repo="$PROD_REPO" pull --mirror edge-stage "$OSTREE_REF"
# Clean compose and blueprints.
greenprint "🧽 Clean up container blueprint and compose"
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
sudo composer-cli blueprints delete container > /dev/null
########################################################
##
## rhel-edge installer image building from container image
##
########################################################
# Write a blueprint for installer image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "installer"
description = "A rhel-edge installer image"
version = "0.0.1"
modules = []
groups = []
EOF
greenprint "📄 installer blueprint"
cat "$BLUEPRINT_FILE"
# Prepare the blueprint for the compose.
greenprint "📋 Preparing installer blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve installer
# Build installer image.
# Test --url arg following by URL with tailling slash for bz#1942029
build_image installer "${INSTALLER_TYPE}" "${PROD_REPO_URL}/"
# Download the image
greenprint "📥 Downloading the installer image"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
ISO_FILENAME="${COMPOSE_ID}-${INSTALLER_FILENAME}"
sudo mv "${ISO_FILENAME}" /var/lib/libvirt/images
# Clean compose and blueprints.
greenprint "🧹 Clean up installer blueprint and compose"
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
sudo composer-cli blueprints delete installer > /dev/null
########################################################
##
## install rhel-edge image with installer(ISO)
##
########################################################
# Ensure SELinux is happy with our new images.
greenprint "👿 Running restorecon on image directory"
sudo restorecon -Rv /var/lib/libvirt/images/
# Create qcow2 file for virt install.
greenprint "🖥 Create qcow2 file for virt install"
LIBVIRT_BIOS_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-bios.qcow2
LIBVIRT_UEFI_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-uefi.qcow2
sudo qemu-img create -f qcow2 "${LIBVIRT_BIOS_IMAGE_PATH}" 20G
sudo qemu-img create -f qcow2 "${LIBVIRT_UEFI_IMAGE_PATH}" 20G
# Write kickstart file for ostree image installation.
greenprint "📑 Generate kickstart file"
tee "$KS_FILE" > /dev/null << STOPHERE
text
network --bootproto=dhcp --device=link --activate --onboot=on
zerombr
clearpart --all --initlabel --disklabel=msdos
autopart --nohome --noswap --type=plain
ostreesetup --nogpg --osname=rhel-edge --remote=rhel-edge --url=file:///ostree/repo --ref=${OSTREE_REF}
poweroff
%post --log=/var/log/anaconda/post-install.log --erroronfail
# no sudo password for user admin
echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
# delete local ostree repo and add external prod edge repo
ostree remote delete rhel-edge
ostree remote add --no-gpg-verify --no-sign-verify rhel-edge ${PROD_REPO_URL}
%end
STOPHERE
##################################################
##
## Install and test Edge image on BIOS VM
##
##################################################
# Install ostree image via anaconda.
greenprint "💿 Install ostree image via installer(ISO) on BIOS VM"
sudo virt-install --initrd-inject="${KS_FILE}" \
--extra-args="inst.ks=file:/ks.cfg console=ttyS0,115200" \
--name="${IMAGE_KEY}-bios" \
--disk path="${LIBVIRT_BIOS_IMAGE_PATH}",format=qcow2 \
--ram 3072 \
--vcpus 2 \
--network network=integration,mac=34:49:22:B0:83:30 \
--os-type linux \
--os-variant ${OS_VARIANT} \
--location "/var/lib/libvirt/images/${ISO_FILENAME}" \
--nographics \
--noautoconsole \
--wait=-1 \
--noreboot
# Start VM.
greenprint "📟 Start BIOS VM"
sudo virsh start "${IMAGE_KEY}-bios"
# Check for ssh ready to go.
greenprint "🛃 Checking for SSH is ready to go"
for LOOP_COUNTER in $(seq 0 30); do
RESULTS="$(wait_for_ssh_up $BIOS_GUEST_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "SSH is ready now! 🥳"
break
fi
sleep 10
done
# Check image installation result
check_result
# Get ostree commit value.
greenprint "🕹 Get ostree install commit value"
INSTALL_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}")
# Run Edge test on BIOS VM
# Add instance IP address into /etc/ansible/hosts
sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF
[ostree_guest]
${BIOS_GUEST_ADDRESS}
[ostree_guest:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=admin
ansible_private_key_file=${SSH_KEY}
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
EOF
# Test IoT/Edge OS
greenprint "📼 Run Edge tests on BIOS VM"
sudo ANSIBLE_STDOUT_CALLBACK=debug ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=rhel-edge -e ostree_commit="${INSTALL_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0
check_result
# Clean up BIOS VM
greenprint "🧹 Clean up BIOS VM"
if [[ $(sudo virsh domstate "${IMAGE_KEY}-bios") == "running" ]]; then
sudo virsh destroy "${IMAGE_KEY}-bios"
fi
sudo virsh undefine "${IMAGE_KEY}-bios"
sudo rm -f "$LIBVIRT_BIOS_IMAGE_PATH"
##################################################
##
## Install, upgrade and test Edge image on UEFI VM
##
##################################################
# Install ostree image via anaconda.
greenprint "💿 Install ostree image via installer(ISO) on UEFI VM"
sudo virt-install --initrd-inject="${KS_FILE}" \
--extra-args="inst.ks=file:/ks.cfg console=ttyS0,115200" \
--name="${IMAGE_KEY}-uefi"\
--disk path="${LIBVIRT_UEFI_IMAGE_PATH}",format=qcow2 \
--ram 3072 \
--vcpus 2 \
--network network=integration,mac=34:49:22:B0:83:31 \
--os-type linux \
--os-variant ${OS_VARIANT} \
--location "/var/lib/libvirt/images/${ISO_FILENAME}" \
--boot uefi,loader_ro=yes,loader_type=pflash,nvram_template=/usr/share/edk2/ovmf/OVMF_VARS.fd,loader_secure=no \
--nographics \
--noautoconsole \
--wait=-1 \
--noreboot
# Start VM.
greenprint "💻 Start UEFI VM"
sudo virsh start "${IMAGE_KEY}-uefi"
# Check for ssh ready to go.
greenprint "🛃 Checking for SSH is ready to go"
for LOOP_COUNTER in $(seq 0 30); do
RESULTS="$(wait_for_ssh_up $UEFI_GUEST_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "SSH is ready now! 🥳"
break
fi
sleep 10
done
# Check image installation result
check_result
##################################################
##
## upgrade rhel-edge with new upgrade commit
##
##################################################
# Write a blueprint for ostree image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "upgrade"
description = "An upgrade rhel-edge container image"
version = "0.0.2"
modules = []
groups = []
[[packages]]
name = "python3"
version = "*"
[[packages]]
name = "wget"
version = "*"
[[customizations.user]]
name = "admin"
description = "Administrator account"
password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl."
key = "${SSH_KEY_PUB}"
home = "/home/admin/"
groups = ["wheel"]
EOF
greenprint "📄 upgrade blueprint"
cat "$BLUEPRINT_FILE"
# Prepare the blueprint for the compose.
greenprint "📋 Preparing upgrade blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve upgrade
# Build upgrade image.
build_image upgrade "${CONTAINER_TYPE}" "$PROD_REPO_URL"
# Download the image
greenprint "📥 Downloading the upgrade image"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
# Clear stage repo running env
greenprint "🧹 Clearing stage repo running env"
# Remove any status containers if exist
sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f
# Remove all images
sudo podman rmi -f -a
# Deal with stage repo container
greenprint "🗜 Extracting image"
IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}"
sudo podman pull "oci-archive:${IMAGE_FILENAME}"
sudo podman images
# Clear image file
sudo rm -f "$IMAGE_FILENAME"
# Run edge stage repo
greenprint "🛰 Running edge stage repo"
# Get image id to run image
EDGE_IMAGE_ID=$(sudo podman images --filter "dangling=true" --format "{{.ID}}")
sudo podman run -d --name rhel-edge --network edge --ip "$STAGE_REPO_ADDRESS" "$EDGE_IMAGE_ID"
# Wait for container to be running
until [ "$(sudo podman inspect -f '{{.State.Running}}' rhel-edge)" == "true" ]; do
sleep 1;
done;
# Pull upgrade to prod mirror
greenprint "⛓ Pull upgrade to prod mirror"
sudo ostree --repo="$PROD_REPO" pull --mirror edge-stage "$OSTREE_REF"
sudo ostree --repo="$PROD_REPO" static-delta generate "$OSTREE_REF"
sudo ostree --repo="$PROD_REPO" summary -u
# Get ostree commit value.
greenprint "🕹 Get ostree upgrade commit value"
UPGRADE_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}")
# Clean compose and blueprints.
greenprint "🧽 Clean up upgrade blueprint and compose"
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
sudo composer-cli blueprints delete upgrade > /dev/null
# Upgrade image/commit.
greenprint "🗳 Upgrade ostree image/commit"
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${UEFI_GUEST_ADDRESS} 'sudo rpm-ostree upgrade'
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${UEFI_GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit'
# Sleep 10 seconds here to make sure vm restarted already
sleep 10
# Check for ssh ready to go.
greenprint "🛃 Checking for SSH is ready to go"
# shellcheck disable=SC2034 # Unused variables left for readability
for LOOP_COUNTER in $(seq 0 30); do
RESULTS="$(wait_for_ssh_up $UEFI_GUEST_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "SSH is ready now! 🥳"
break
fi
sleep 10
done
# Check ostree upgrade result
check_result
# Add instance IP address into /etc/ansible/hosts
sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF
[ostree_guest]
${UEFI_GUEST_ADDRESS}
[ostree_guest:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=admin
ansible_private_key_file=${SSH_KEY}
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
EOF
# Test IoT/Edge OS
sudo ANSIBLE_STDOUT_CALLBACK=debug ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=rhel-edge -e ostree_commit="${UPGRADE_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree_nort.yaml || RESULTS=0
check_result
# Final success clean up
clean_up
exit 0

View file

@ -0,0 +1,530 @@
---
- hosts: ostree_guest
become: no
vars:
workspace: "{{ lookup('env', 'WORKSPACE') }}"
total_counter: "0"
failed_counter: "0"
tasks:
# current target host's IP address
- debug: var=ansible_all_ipv4_addresses
# default kernel or rt kernel
- name: check installed kernel
command: uname -r
register: result_kernel
# first installed or upgraded
# first installed has one commit, but upgraded has two
- name: determin which stage the checking is running on
shell: rpm-ostree status --json | jq '.deployments | length'
register: result_stage
- set_fact:
checking_stage: "{{ result_stage.stdout }}"
# case: check ostree commit correctly updated
- name: get deployed ostree commit
command: rpm-ostree status --json
register: result_commit
- name: make a json result
set_fact:
deploy_commit: "{{ result_commit.stdout | from_json | json_query('deployments[0].checksum') }}"
- name: check commit deployed and built
block:
- assert:
that:
- deploy_commit == ostree_commit
fail_msg: "deployed ostree commit is not commit built by osbuild-composer"
success_msg: "successful building and deployment"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case from bug: https://bugzilla.redhat.com/show_bug.cgi?id=1848453
- name: check ostree-remount status
command: systemctl is-active ostree-remount.service
register: result_remount
- name: ostree-remount should be started
block:
- assert:
that:
- result_remount.stdout == "active"
fail_msg: "ostree-remount is not started by default"
success_msg: "starting ostree-remount successful"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# bios or uefi
- name: bios or uefi
stat:
path: /sys/firmware/efi
register: result_uefi
- set_fact:
device_name: /dev/vda2
- set_fact:
device_name: /dev/vda3
when: result_uefi.stat.exists
# for edge-simplified-installer and edge-raw-image, /sysroot is mounted at /dev/vda4, need to set device_name to /dev/vda4
- name: check if it is simplified-installer or raw-image
command: df -h
register: result_df
- set_fact:
device_name: /dev/vda4
when: "'/dev/vda4' in result_df.stdout"
# case: check /sysroot moutn point
- name: check /sysroot mount point
command: findmnt -r -o SOURCE -n /sysroot
register: result_sysroot_mount_point
- name: "/sysroot should be mounted on {{ device_name }}"
block:
- assert:
that:
- result_sysroot_mount_point.stdout == "{{ device_name }}"
fail_msg: "/var does not mount on {{ device_name }}"
success_msg: "/var mounts on {{ device_name }}"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check /sysroot mount status
- name: check /sysroot mount status
shell: findmnt -r -o OPTIONS -n /sysroot | awk -F "," '{print $1}'
register: result_sysroot_mount_status
- name: /sysroot should be mount with rw permission
block:
- assert:
that:
- result_sysroot_mount_status.stdout == "rw"
fail_msg: "/sysroot is not mounted with rw permission"
success_msg: "/sysroot is mounted with rw permission"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check /var mount point
- name: check /var mount point
command: findmnt -r -o SOURCE -n /var
register: result_var_mount_point
- name: "/var should be mounted on {{ device_name }}[/ostree/deploy/{{ image_type }}/var]"
block:
- assert:
that:
- result_var_mount_point.stdout == "{{ device_name }}[/ostree/deploy/{{ image_type }}/var]"
fail_msg: "/var does not mount on {{ device_name }}[/ostree/deploy/{{ image_type }}/var]"
success_msg: "/var mounts on {{ device_name }}[/ostree/deploy/{{ image_type }}/var]"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check /var mount status
- name: check /var mount status
shell: findmnt -r -o OPTIONS -n /var | awk -F "," '{print $1}'
register: result_var_mount_status
- name: /var should be mount with rw permission
block:
- assert:
that:
- result_var_mount_status.stdout == "rw"
fail_msg: "/var is not mounted with rw permission"
success_msg: "/var is mounted with rw permission"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check /usr mount point
- name: check /usr mount point
command: findmnt -r -o SOURCE -n /usr
register: result_usr_mount_point
- name: "/usr should be mounted on {{ device_name }}[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
block:
- assert:
that:
- result_usr_mount_point.stdout == "{{ device_name }}[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
fail_msg: "/usr does not mount on {{ device_name }}[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
success_msg: "/usr mounts on {{ device_name }}[/ostree/deploy/{{ image_type }}/deploy/{{ deploy_commit }}.0/usr]"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check /usr mount status
- name: check /usr mount status
shell: findmnt -r -o OPTIONS -n /usr | awk -F "," '{print $1}'
register: result_usr_mount_status
- name: /usr should be mount with rw permission
block:
- assert:
that:
- result_usr_mount_status.stdout == "ro"
fail_msg: "/usr is not mounted with ro permission"
success_msg: "/usr is mounted with ro permission"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
- name: get the first 10 chars in commit hash
set_fact:
commit_log: "{{ deploy_commit[:11] }}"
# case: check wget installed after upgrade
- name: check installed package
shell: rpm -qa | sort
register: result_packages
- name: check wget installed
block:
- assert:
that:
- "'wget' in result_packages.stdout"
fail_msg: "wget not installed, ostree upgrade might be failed"
success_msg: "wget installed in ostree upgrade"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
when: checking_stage == "2"
- name: save installed package to log file
copy:
content: "{{ result_packages.stdout }}"
dest: "{{ workspace }}/{{ commit_log }}.installed.ostree.log"
delegate_to: localhost
# case: check ostree-remount mount log
- name: check ostree-remount mount log
command: journalctl -u ostree-remount
register: result_remount_jounalctl
- name: ostree-remount should remount /var and /sysroot
block:
- assert:
that:
- "'/sysroot' in result_remount_jounalctl.stdout"
- "'/var' in result_remount_jounalctl.stdout"
fail_msg: "/sysroot or /var are not remounted by ostree-remount"
success_msg: "/sysroot and /var are remount"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check dmesg error and failed log
- name: check dmesg output
command: dmesg
register: result_dmesg
- name: save dmesg output to log file
copy:
content: "{{ result_dmesg.stdout }}"
dest: "{{ workspace }}/{{ commit_log }}.dmesg.ostree.log"
delegate_to: localhost
- name: check dmesg error and fail log
shell: dmesg --notime | grep -i "error\|fail" || true
register: result_dmesg_error
# case: check running container with podman
- name: run ubi8 image
command: podman run registry.access.redhat.com/ubi8/ubi-minimal:latest cat /etc/redhat-release
register: podman_result
become: yes
ignore_errors: yes # due to https://bugzilla.redhat.com/show_bug.cgi?id=1903983
- name: run container test
block:
- assert:
that:
- podman_result is succeeded
- "'Red Hat Enterprise Linux release' in podman_result.stdout"
fail_msg: "failed run container with podman"
success_msg: "running container with podman successed"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check dnf package and it should not be installed
# https://github.com/osbuild/osbuild-composer/blob/master/internal/distro/rhel8/distro.go#L642
- name: dnf should not be installed
block:
- name: dnf should not be installed
shell: rpm -qa | grep dnf || echo -n PASS
register: result_dnf
- assert:
that:
- result_dnf.stdout == "PASS"
fail_msg: "dnf is installed"
success_msg: "No dnf installed"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
when: ansible_facts['distribution'] != 'RedHat' and ansible_facts ['distribution_version'] != '8.5'
# case: check installed greenboot packages
# https://github.com/osbuild/osbuild-composer/blob/master/internal/distro/rhel8/distro.go#L634
- name: greenboot and it's related packages should be installed
block:
- name: greenboot and it's related packages should be installed
shell: rpm -qa | grep greenboot
register: result_greenboot_packages
- assert:
that:
- "'greenboot-0' in result_greenboot_packages.stdout"
- "'greenboot-grub2' in result_greenboot_packages.stdout"
- "'greenboot-rpm-ostree-grub2' in result_greenboot_packages.stdout"
- "'greenboot-reboot' in result_greenboot_packages.stdout"
- "'greenboot-status' in result_greenboot_packages.stdout"
fail_msg: "Some of greenboot and its related packages are not installed"
success_msg: "All greenboot and its related packages are installed"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check greenboot* services
- name: a list of greenboot* service should be enabled
block:
- name: a list of greenboot* service should be enabled
command: systemctl is-enabled greenboot-grub2-set-counter greenboot-grub2-set-success greenboot-healthcheck greenboot-rpm-ostree-grub2-check-fallback greenboot-status greenboot-task-runner redboot-auto-reboot redboot-task-runner
register: result_greenboot_service
- assert:
that:
- result_greenboot_service.stdout == 'enabled\nenabled\nenabled\nenabled\nenabled\nenabled\nenabled\nenabled'
fail_msg: "Some of greenboot* services are not enabled"
success_msg: "All greenboot* services are enabled"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check greenboot* services log
- name: all greenboot* service should run without error
block:
- name: all greenboot* service should run without error
command: journalctl -b -0 -u boot-complete.target -u greenboot -u greenboot-healthcheck -u greenboot-rpm-ostree-grub2-check-fallback -u greenboot-grub2-set-counter -u greenboot-grub2-set-success -u greenboot-status -u redboot -u redboot-auto-reboot -u redboot.target
register: result_greenboot_log
- assert:
that:
- "'Script \\'00_required_scripts_start.sh\\' SUCCESS' in result_greenboot_log.stdout"
- "'Script \\'00_wanted_scripts_start.sh\\' SUCCESS' in result_greenboot_log.stdout"
- "'greenboot Health Checks Runner' in result_greenboot_log.stdout"
- "'Reached target Boot Completion Check' in result_greenboot_log.stdout"
- "'Mark boot as successful in grubenv' in result_greenboot_log.stdout"
- "'Boot Status is GREEN - Health Check SUCCESS' in result_greenboot_log.stdout"
- "'greenboot MotD Generator' in result_greenboot_log.stdout"
fail_msg: "Some errors happened in service boot"
success_msg: "All greenboot services booted success"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check grubenv variables
- name: grubenv variables should contain boot_success=1
block:
- name: grubenv variables should contain boot_success=1
command: grub2-editenv list
register: result_grubenv
become: yes
- assert:
that:
- "'boot_success=1' in result_grubenv.stdout"
fail_msg: "Not found boot_success=1"
success_msg: "Found boot_success=1"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check rollback function if boot error found
- name: install sanely failing health check unit to test red boot status behavior
block:
- name: install sanely failing health check unit to test red boot status behavior
command: rpm-ostree install http://file-server-virt-qe-3rd.apps.ocp4.prod.psi.redhat.com/greenboot-failing-unit-1.0-1.el8.noarch.rpm
become: yes
- name: reboot to deploy new ostree commit
reboot:
become: yes
- name: waits until instance is reachable
wait_for:
host: "{{ ansible_all_ipv4_addresses[0] }}"
port: 22
search_regex: OpenSSH
delay: 10
register: result_rollback
- assert:
that:
- result_rollback is succeeded
fail_msg: "Rollback failed"
success_msg: "Rollback success"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check ostree commit after rollback
- name: check ostree commit after rollback
block:
- name: check ostree commit after rollback
command: rpm-ostree status --json
register: result_commit
- name: make a json result
set_fact:
deploy_commit: "{{ result_commit.stdout | from_json | json_query('deployments[0].checksum') }}"
- assert:
that:
- deploy_commit == ostree_commit
fail_msg: "Not rollback to last commit"
success_msg: "Rollback success"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
when: result_rollback is succeeded
# case: check greenboot* services log again
- name: fallback log should be found here
block:
- name: fallback log should be found here
command: journalctl -b -0 -u boot-complete.target -u greenboot -u greenboot-healthcheck -u greenboot-rpm-ostree-grub2-check-fallback -u greenboot-grub2-set-counter -u greenboot-grub2-set-success -u greenboot-status -u redboot -u redboot-auto-reboot -u redboot.target
register: result_greenboot_log
- assert:
that:
- "'FALLBACK BOOT DETECTED! Default rpm-ostree deployment has been rolled back' in result_greenboot_log.stdout"
- "'Script \\'00_required_scripts_start.sh\\' SUCCESS' in result_greenboot_log.stdout"
- "'Script \\'00_wanted_scripts_start.sh\\' SUCCESS' in result_greenboot_log.stdout"
- "'greenboot Health Checks Runner' in result_greenboot_log.stdout"
- "'Reached target Boot Completion Check' in result_greenboot_log.stdout"
- "'Mark boot as successful in grubenv' in result_greenboot_log.stdout"
- "'Boot Status is GREEN - Health Check SUCCESS' in result_greenboot_log.stdout"
- "'greenboot MotD Generator' in result_greenboot_log.stdout"
fail_msg: "Fallback log not found"
success_msg: "Found fallback log"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
when: result_rollback is succeeded
# case: check grubenv variables again
- name: grubenv variables should contain boot_success=1
block:
- name: grubenv variables should contain boot_success=1
command: grub2-editenv list
register: result_grubenv
become: yes
- assert:
that:
- "'boot_success=1' in result_grubenv.stdout"
fail_msg: "Not found boot_success=1"
success_msg: "Found boot_success=1"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
when: result_rollback is succeeded
- assert:
that:
- failed_counter == "0"
fail_msg: "Run {{ total_counter }} tests, but {{ failed_counter }} of them failed"
success_msg: "Totally {{ total_counter }} test passed"