tests: move the libvirt test logic out of Jenkinsfile

All tests in /usr/libexec/tests/osbuild-composer should be able to run without
any arguments. This was not a case of libvirt.sh - it required two arguments
set by some Jenkinsfile logic.

This commit moves test/cases/libvirt.sh to tools/libvirt_test.sh and extracts
the logic controlling the test case from Jenkinsfile to test/cases/libvirt.sh.

Signed-off-by: Ondřej Budai <ondrej@budai.cz>
This commit is contained in:
Ondřej Budai 2020-12-01 16:20:13 +01:00 committed by Ondřej Budai
parent 8963613e91
commit cbc9082fac
4 changed files with 369 additions and 373 deletions

View file

@ -174,6 +174,7 @@ install -m 0755 -vp tools/provision.sh %{buildroot}%{_l
install -m 0755 -vp tools/image-info %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/run-koji-container.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/koji-compose.py %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/libvirt_test.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer
install -m 0755 -vp test/cases/* %{buildroot}%{_libexecdir}/tests/osbuild-composer/

64
schutzbot/Jenkinsfile vendored
View file

@ -118,7 +118,7 @@ pipeline {
agent { label "f32cloudbase && x86_64 && aws" }
environment { TEST_TYPE = "base" }
steps {
run_tests('base', 'bios')
run_tests('base')
}
post {
always {
@ -138,7 +138,7 @@ pipeline {
DISTRO_CODE = "fedora32"
}
steps {
run_tests('image', 'bios')
run_tests('image')
}
post {
always {
@ -159,7 +159,7 @@ pipeline {
AWS_API_TEST_SHARE_ACCOUNT = credentials('aws-credentials-share-account')
}
steps {
run_tests('integration', 'bios')
run_tests('integration')
}
post {
always {
@ -170,7 +170,7 @@ pipeline {
stage('F32 OSTree') {
agent { label "f32cloudbase && psi && x86_64" }
steps {
run_tests('ostree', 'bios')
run_tests('ostree')
}
post {
always {
@ -193,7 +193,7 @@ pipeline {
agent { label "f33cloudbase && x86_64 && aws" }
environment { TEST_TYPE = "base" }
steps {
run_tests('base', 'bios')
run_tests('base')
}
post {
always {
@ -213,7 +213,7 @@ pipeline {
DISTRO_CODE = "fedora33"
}
steps {
run_tests('image', 'bios')
run_tests('image')
}
post {
always {
@ -234,7 +234,7 @@ pipeline {
AWS_API_TEST_SHARE_ACCOUNT = credentials('aws-credentials-share-account')
}
steps {
run_tests('integration', 'bios')
run_tests('integration')
}
post {
always {
@ -245,7 +245,7 @@ pipeline {
stage('F33 OSTree') {
agent { label "f33cloudbase && psi && x86_64" }
steps {
run_tests('ostree', 'bios')
run_tests('ostree')
}
post {
always {
@ -257,7 +257,7 @@ pipeline {
agent { label "f33cloudbase && aarch64 && aws" }
environment { TEST_TYPE = "base" }
steps {
run_tests('base', 'bios')
run_tests('base')
}
post {
always {
@ -276,7 +276,7 @@ pipeline {
DISTRO_CODE = "fedora33"
}
steps {
run_tests('image', 'bios')
run_tests('image')
}
post {
always {
@ -295,7 +295,7 @@ pipeline {
RHN_REGISTRATION_SCRIPT = credentials('rhn-register-script-production')
}
steps {
run_tests('base', 'bios')
run_tests('base')
}
post {
always {
@ -316,7 +316,7 @@ pipeline {
DISTRO_CODE = "rhel8"
}
steps {
run_tests('image', 'bios')
run_tests('image')
}
post {
always {
@ -338,7 +338,7 @@ pipeline {
AWS_API_TEST_SHARE_ACCOUNT = credentials('aws-credentials-share-account')
}
steps {
run_tests('integration', 'bios')
run_tests('integration')
}
post {
always {
@ -349,7 +349,7 @@ pipeline {
stage('EL8 OSTree') {
agent { label "rhel8cloudbase && psi && x86_64" }
steps {
run_tests('ostree', 'bios')
run_tests('ostree')
}
post {
always {
@ -374,7 +374,7 @@ pipeline {
TEST_TYPE = "base"
}
steps {
run_tests('base', 'hybrid')
run_tests('base')
}
post {
always {
@ -394,7 +394,7 @@ pipeline {
DISTRO_CODE = "rhel84"
}
steps {
run_tests('image', 'hybrid')
run_tests('image')
}
post {
always {
@ -415,7 +415,7 @@ pipeline {
AWS_IMAGE_TEST_CREDS = credentials('aws-credentials-osbuild-image-test')
}
steps {
run_tests('integration', 'hybrid')
run_tests('integration')
}
post {
always {
@ -426,7 +426,7 @@ pipeline {
stage('EL8.4 OSTree') {
agent { label "rhel84cloudbase && psi && x86_64" }
steps {
run_tests('ostree', 'hybrid')
run_tests('ostree')
}
post {
always {
@ -463,7 +463,7 @@ pipeline {
// Set up a function to hold the steps needed to run the tests so we don't
// need to copy/paste the same lines over and over above.
void run_tests(test_type, boot_type) {
void run_tests(test_type) {
// Get CI machine details.
sh (
@ -506,30 +506,10 @@ void run_tests(test_type, boot_type) {
script: "/usr/libexec/tests/osbuild-composer/koji.sh"
)
// Run the qcow2 BIOS boot test.
// Run the libvirt test.
sh (
label: "Integration test: QCOW2, BIOS boot",
script: "/usr/libexec/tests/osbuild-composer/libvirt.sh qcow2"
)
if (boot_type == 'hybrid') {
// Run the qcow2 UEFI boot test.
sh (
label: "Integration test: QCOW2, UEFI boot",
script: "/usr/libexec/tests/osbuild-composer/libvirt.sh qcow2 uefi"
)
}
// Run the openstack test.
sh (
label: "Integration test: OpenStack",
script: "/usr/libexec/tests/osbuild-composer/libvirt.sh openstack"
)
// Run the VHD/Azure test.
sh (
label: "Integration test: VHD",
script: "/usr/libexec/tests/osbuild-composer/libvirt.sh vhd"
label: "Integration test: libvirt",
script: "/usr/libexec/tests/osbuild-composer/libvirt.sh"
)
// Run the AWS test.

339
test/cases/libvirt.sh Executable file → Normal file
View file

@ -1,341 +1,18 @@
#!/bin/bash
set -euo pipefail
OSBUILD_COMPOSER_TEST_DATA=/usr/share/tests/osbuild-composer/
# Get OS data.
source /etc/os-release
ARCH=$(uname -m)
# Provision the software under tet.
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh
# Take the image type passed to the script or use qcow2 by default if nothing
# was passed.
IMAGE_TYPE=${1:-qcow2}
# Take the boot type passed to the script or use BIOS by default if nothing
# was passed.
BOOT_TYPE=${2:-bios}
# Test the images
/usr/libexec/osbuild-composer-test/libvirt_test.sh qcow2
/usr/libexec/osbuild-composer-test/libvirt_test.sh openstack
/usr/libexec/osbuild-composer-test/libvirt_test.sh vhd
# Select the file extension based on the image that we are building.
IMAGE_EXTENSION=$IMAGE_TYPE
if [[ $IMAGE_TYPE == 'openstack' ]]; then
IMAGE_EXTENSION=qcow2
# RHEL 8.4 images also supports uefi, check that
if [[ "${ID}-${VERSION_ID}" == "rhel-8.4" ]]; then
/usr/libexec/osbuild-composer-test/libvirt_test.sh qcow2 uefi
fi
# RHEL 8 cannot boot a VMDK using libvirt. See BZ 999789.
if [[ $IMAGE_TYPE == vmdk ]]; then
echo "🤷 libvirt cannot boot stream-optimized VMDK."
exit 0
fi
# Apply lorax patch to work around pytoml issues in RHEL 8.x.
# See BZ 1843704 or https://github.com/weldr/lorax/pull/1030 for more details.
if [[ $ID == rhel ]]; then
sudo sed -r -i 's#toml.load\(args\[3\]\)#toml.load(open(args[3]))#' \
/usr/lib/python3.6/site-packages/composer/cli/compose.py
sudo rm -f /usr/lib/python3.6/site-packages/composer/cli/compose.pyc
fi
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
# Start libvirtd and test it.
greenprint "🚀 Starting libvirt daemon"
sudo systemctl start libvirtd
sudo virsh list --all > /dev/null
# Set a customized dnsmasq configuration for libvirt so we always get the
# same address on bootup.
sudo tee /tmp/integration.xml > /dev/null << EOF
<network>
<name>integration</name>
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='integration' stp='on' delay='0'/>
<mac address='52:54:00:36:46:ef'/>
<ip address='192.168.100.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.100.2' end='192.168.100.254'/>
<host mac='34:49:22:B0:83:30' name='vm' ip='192.168.100.50'/>
</dhcp>
</ip>
</network>
EOF
if ! sudo virsh net-info integration > /dev/null 2>&1; then
sudo virsh net-define /tmp/integration.xml
sudo virsh net-start integration
fi
# Allow anyone in the wheel group to talk to libvirt.
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
WHEEL_GROUP=wheel
if [[ $ID == rhel ]]; then
WHEEL_GROUP=adm
fi
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.isInGroup("${WHEEL_GROUP}")) {
return polkit.Result.YES;
}
});
EOF
# Set up variables.
TEST_UUID=$(uuidgen)
IMAGE_KEY=osbuild-composer-qemu-test-${TEST_UUID}
INSTANCE_ADDRESS=192.168.100.50
# Set up temporary files.
TEMPDIR=$(mktemp -d)
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
# Check for the smoke test file on the AWS instance that we start.
smoke_test_check () {
# Ensure the ssh key has restricted permissions.
SSH_KEY=${OSBUILD_COMPOSER_TEST_DATA}keyring/id_rsa
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o ConnectTimeout=5)
SMOKE_TEST=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" redhat@"${1}" 'cat /etc/smoke-test.txt')
if [[ $SMOKE_TEST == smoke-test ]]; then
echo 1
else
echo 0
fi
}
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
tar -xf "$TARBALL"
rm -f "$TARBALL"
# Move the JSON file into place.
cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Write a basic blueprint for our image.
# NOTE(mhayden): The service customization will always be required for QCOW2
# but it is needed for OpenStack due to issue #698 in osbuild-composer. 😭
# NOTE(mhayden): The cloud-init package isn't included in VHD/Azure images
# by default and it must be added here.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bash"
description = "A base system with bash"
version = "0.0.1"
[[packages]]
name = "bash"
[[packages]]
name = "cloud-init"
[customizations.services]
enabled = ["sshd", "cloud-init", "cloud-init-local", "cloud-config", "cloud-final"]
[customizations.kernel]
append = "LANG=en_US.UTF-8 net.ifnames=0 biosdevname=0"
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bash
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bash "$IMAGE_TYPE" | tee "$COMPOSE_START"
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 5
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Stop watching the worker journal.
sudo kill ${WORKER_JOURNAL_PID}
# Download the image.
greenprint "📥 Downloading the image"
# Current $PWD is inside /tmp, there may not be enough space for an image.
# Let's use a bigger temporary directory for this operation.
BIG_TEMP_DIR=/var/lib/osbuild-composer-tests
sudo rm -rf "${BIG_TEMP_DIR}" || true
sudo mkdir "${BIG_TEMP_DIR}"
pushd "${BIG_TEMP_DIR}"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME=$(basename "$(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}")")
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
popd
# Prepare cloud-init data.
CLOUD_INIT_DIR=$(mktemp -d)
cp "${OSBUILD_COMPOSER_TEST_DATA}"/cloud-init/{meta,user}-data "${CLOUD_INIT_DIR}"/
cp "${OSBUILD_COMPOSER_TEST_DATA}"/cloud-init/network-config "${CLOUD_INIT_DIR}"/
# Set up a cloud-init ISO.
greenprint "💿 Creating a cloud-init ISO"
CLOUD_INIT_PATH=/var/lib/libvirt/images/seed.iso
rm -f $CLOUD_INIT_PATH
pushd "$CLOUD_INIT_DIR"
sudo genisoimage -o $CLOUD_INIT_PATH -V cidata \
-r -J user-data meta-data network-config > /dev/null 2>&1
popd
# Ensure SELinux is happy with our new images.
greenprint "👿 Running restorecon on image directory"
sudo restorecon -Rv /var/lib/libvirt/images/
# Run virt-install to import the QCOW and boot it.
greenprint "🚀 Booting the image with libvirt"
if [[ $ARCH == 'ppc64le' ]]; then
# ppc64le has some machine quirks that must be worked around.
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 2048 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30 \
--qemu-commandline="-machine pseries,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off,cap-large-decr=off"
elif [[ $ARCH == 's390x' ]]; then
# Our s390x machines are highly constrained on resources.
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 512 \
--vcpus 1 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30
else
# Both aarch64 and x86_64 support hybrid boot
if [[ $BOOT_TYPE == 'uefi' ]]; then
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 1024 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--boot uefi,nvram_template=/usr/share/edk2/ovmf/OVMF_VARS.fd \
--network network=integration,mac=34:49:22:B0:83:30
else
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 1024 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30
fi
fi
# Set a number of maximum loops to check for our smoke test file via ssh.
case $ARCH in
s390x)
# s390x needs more time to boot its VM.
MAX_LOOPS=60
;;
*)
MAX_LOOPS=30
;;
esac
# Check for our smoke test file.
greenprint "🛃 Checking for smoke test file in VM"
# shellcheck disable=SC2034 # Unused variables left for readability
for LOOP_COUNTER in $(seq 0 ${MAX_LOOPS}); do
RESULTS="$(smoke_test_check $INSTANCE_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "Smoke test passed! 🥳"
break
fi
sleep 10
done
# Clean up our mess.
greenprint "🧼 Cleaning up"
sudo virsh destroy "${IMAGE_KEY}"
if [[ $ARCH == aarch64 || $BOOT_TYPE == 'uefi' ]]; then
sudo virsh undefine "${IMAGE_KEY}" --nvram
else
sudo virsh undefine "${IMAGE_KEY}"
fi
sudo rm -f "$LIBVIRT_IMAGE_PATH" $CLOUD_INIT_PATH
# Also delete the compose so we don't run out of disk space
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then
greenprint "💚 Success"
else
greenprint "❌ Failed"
exit 1
fi
exit 0

338
tools/libvirt_test.sh Executable file
View file

@ -0,0 +1,338 @@
#!/bin/bash
set -euo pipefail
OSBUILD_COMPOSER_TEST_DATA=/usr/share/tests/osbuild-composer/
# Get OS data.
source /etc/os-release
ARCH=$(uname -m)
# Take the image type passed to the script or use qcow2 by default if nothing
# was passed.
IMAGE_TYPE=${1:-qcow2}
# Take the boot type passed to the script or use BIOS by default if nothing
# was passed.
BOOT_TYPE=${2:-bios}
# Select the file extension based on the image that we are building.
IMAGE_EXTENSION=$IMAGE_TYPE
if [[ $IMAGE_TYPE == 'openstack' ]]; then
IMAGE_EXTENSION=qcow2
fi
# RHEL 8 cannot boot a VMDK using libvirt. See BZ 999789.
if [[ $IMAGE_TYPE == vmdk ]]; then
echo "🤷 libvirt cannot boot stream-optimized VMDK."
exit 0
fi
# Apply lorax patch to work around pytoml issues in RHEL 8.x.
# See BZ 1843704 or https://github.com/weldr/lorax/pull/1030 for more details.
if [[ $ID == rhel ]]; then
sudo sed -r -i 's#toml.load\(args\[3\]\)#toml.load(open(args[3]))#' \
/usr/lib/python3.6/site-packages/composer/cli/compose.py
sudo rm -f /usr/lib/python3.6/site-packages/composer/cli/compose.pyc
fi
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
# Start libvirtd and test it.
greenprint "🚀 Starting libvirt daemon"
sudo systemctl start libvirtd
sudo virsh list --all > /dev/null
# Set a customized dnsmasq configuration for libvirt so we always get the
# same address on bootup.
sudo tee /tmp/integration.xml > /dev/null << EOF
<network>
<name>integration</name>
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='integration' stp='on' delay='0'/>
<mac address='52:54:00:36:46:ef'/>
<ip address='192.168.100.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.100.2' end='192.168.100.254'/>
<host mac='34:49:22:B0:83:30' name='vm' ip='192.168.100.50'/>
</dhcp>
</ip>
</network>
EOF
if ! sudo virsh net-info integration > /dev/null 2>&1; then
sudo virsh net-define /tmp/integration.xml
sudo virsh net-start integration
fi
# Allow anyone in the wheel group to talk to libvirt.
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
WHEEL_GROUP=wheel
if [[ $ID == rhel ]]; then
WHEEL_GROUP=adm
fi
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.isInGroup("${WHEEL_GROUP}")) {
return polkit.Result.YES;
}
});
EOF
# Set up variables.
TEST_UUID=$(uuidgen)
IMAGE_KEY=osbuild-composer-qemu-test-${TEST_UUID}
INSTANCE_ADDRESS=192.168.100.50
# Set up temporary files.
TEMPDIR=$(mktemp -d)
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
# Check for the smoke test file on the AWS instance that we start.
smoke_test_check () {
# Ensure the ssh key has restricted permissions.
SSH_KEY=${OSBUILD_COMPOSER_TEST_DATA}keyring/id_rsa
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o ConnectTimeout=5)
SMOKE_TEST=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" redhat@"${1}" 'cat /etc/smoke-test.txt')
if [[ $SMOKE_TEST == smoke-test ]]; then
echo 1
else
echo 0
fi
}
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
tar -xf "$TARBALL"
rm -f "$TARBALL"
# Move the JSON file into place.
cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Write a basic blueprint for our image.
# NOTE(mhayden): The service customization will always be required for QCOW2
# but it is needed for OpenStack due to issue #698 in osbuild-composer. 😭
# NOTE(mhayden): The cloud-init package isn't included in VHD/Azure images
# by default and it must be added here.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bash"
description = "A base system with bash"
version = "0.0.1"
[[packages]]
name = "bash"
[[packages]]
name = "cloud-init"
[customizations.services]
enabled = ["sshd", "cloud-init", "cloud-init-local", "cloud-config", "cloud-final"]
[customizations.kernel]
append = "LANG=en_US.UTF-8 net.ifnames=0 biosdevname=0"
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bash
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bash "$IMAGE_TYPE" | tee "$COMPOSE_START"
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 5
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Stop watching the worker journal.
sudo kill ${WORKER_JOURNAL_PID}
# Download the image.
greenprint "📥 Downloading the image"
# Current $PWD is inside /tmp, there may not be enough space for an image.
# Let's use a bigger temporary directory for this operation.
BIG_TEMP_DIR=/var/lib/osbuild-composer-tests
sudo rm -rf "${BIG_TEMP_DIR}" || true
sudo mkdir "${BIG_TEMP_DIR}"
pushd "${BIG_TEMP_DIR}"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME=$(basename "$(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}")")
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
popd
# Prepare cloud-init data.
CLOUD_INIT_DIR=$(mktemp -d)
cp "${OSBUILD_COMPOSER_TEST_DATA}"/cloud-init/{meta,user}-data "${CLOUD_INIT_DIR}"/
cp "${OSBUILD_COMPOSER_TEST_DATA}"/cloud-init/network-config "${CLOUD_INIT_DIR}"/
# Set up a cloud-init ISO.
greenprint "💿 Creating a cloud-init ISO"
CLOUD_INIT_PATH=/var/lib/libvirt/images/seed.iso
rm -f $CLOUD_INIT_PATH
pushd "$CLOUD_INIT_DIR"
sudo genisoimage -o $CLOUD_INIT_PATH -V cidata \
-r -J user-data meta-data network-config > /dev/null 2>&1
popd
# Ensure SELinux is happy with our new images.
greenprint "👿 Running restorecon on image directory"
sudo restorecon -Rv /var/lib/libvirt/images/
# Run virt-install to import the QCOW and boot it.
greenprint "🚀 Booting the image with libvirt"
if [[ $ARCH == 'ppc64le' ]]; then
# ppc64le has some machine quirks that must be worked around.
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 2048 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30 \
--qemu-commandline="-machine pseries,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off,cap-large-decr=off"
elif [[ $ARCH == 's390x' ]]; then
# Our s390x machines are highly constrained on resources.
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 512 \
--vcpus 1 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30
else
# Both aarch64 and x86_64 support hybrid boot
if [[ $BOOT_TYPE == 'uefi' ]]; then
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 1024 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--boot uefi,nvram_template=/usr/share/edk2/ovmf/OVMF_VARS.fd \
--network network=integration,mac=34:49:22:B0:83:30
else
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 1024 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30
fi
fi
# Set a number of maximum loops to check for our smoke test file via ssh.
case $ARCH in
s390x)
# s390x needs more time to boot its VM.
MAX_LOOPS=60
;;
*)
MAX_LOOPS=30
;;
esac
# Check for our smoke test file.
greenprint "🛃 Checking for smoke test file in VM"
# shellcheck disable=SC2034 # Unused variables left for readability
for LOOP_COUNTER in $(seq 0 ${MAX_LOOPS}); do
RESULTS="$(smoke_test_check $INSTANCE_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "Smoke test passed! 🥳"
break
fi
sleep 10
done
# Clean up our mess.
greenprint "🧼 Cleaning up"
sudo virsh destroy "${IMAGE_KEY}"
if [[ $ARCH == aarch64 || $BOOT_TYPE == 'uefi' ]]; then
sudo virsh undefine "${IMAGE_KEY}" --nvram
else
sudo virsh undefine "${IMAGE_KEY}"
fi
sudo rm -f "$LIBVIRT_IMAGE_PATH" $CLOUD_INIT_PATH
# Also delete the compose so we don't run out of disk space
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then
greenprint "💚 Success"
else
greenprint "❌ Failed"
exit 1
fi
exit 0