Add native qcow2 boot test

Add an end-to-end qcow2 test that follows a customer's steps with
`composer-cli`. The image is booted with libvirt to allow the best
virtualization options to be chosen by libvirt. It also uses libvirt's
default network.

Signed-off-by: Major Hayden <major@redhat.com>
This commit is contained in:
Major Hayden 2020-06-08 17:00:20 -05:00 committed by Major Hayden
parent 1e310b61b9
commit 860bb219af
3 changed files with 338 additions and 35 deletions

236
test/image-tests/qcow2.sh Executable file
View file

@ -0,0 +1,236 @@
#!/bin/bash
set -euo pipefail
# Booting an OpenStack and QCOW2 instance is almost identical, so we can
# handle both with the same script.
IMAGE_TYPE=${1:-qcow2}
# Get OS data.
source /etc/os-release
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
# We need jq for parsing composer-cli output.
if ! hash jq; then
greenprint "📦 Installing jq"
sudo dnf -qy install jq
fi
# Install required packages.
greenprint "📦 Installing required packages"
sudo dnf -qy install htop libvirt-client libvirt-daemon \
libvirt-daemon-config-network libvirt-daemon-config-nwfilter \
libvirt-daemon-driver-interface libvirt-daemon-driver-network \
libvirt-daemon-driver-nodedev libvirt-daemon-driver-nwfilter \
libvirt-daemon-driver-qemu libvirt-daemon-driver-secret \
libvirt-daemon-driver-storage libvirt-daemon-driver-storage-disk \
libvirt-daemon-kvm qemu-img qemu-kvm virt-install
# Start libvirtd and test it.
greenprint "🚀 Starting libvirt daemon"
sudo systemctl start libvirtd
sudo virsh list --all > /dev/null
# Allow anyone in the wheel group to talk to libvirt.
greenprint "🚪 Allowing users in wheel group to valk to libvirt"
WHEEL_GROUP=wheel
if [[ $ID == rhel ]]; then
WHEEL_GROUP=adm
fi
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.isInGroup("${WHEEL_GROUP}")) {
return polkit.Result.YES;
}
});
EOF
# Jenkins sets WORKSPACE to the job workspace, but if this script runs
# outside of Jenkins, we can set up a temporary directory instead.
if [[ ${WORKSPACE:-empty} == empty ]]; then
WORKSPACE=$(mktemp -d)
fi
# Set up variables.
TEST_UUID=$(uuidgen)
IMAGE_KEY=osbuild-composer-aws-test-${TEST_UUID}
# Set up temporary files.
TEMPDIR=$(mktemp -d)
AWS_CONFIG=${TEMPDIR}/aws.toml
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
DOMIF_CHECK=${TEMPDIR}/domifcheck-${IMAGE_KEY}.json
# Check for the smoke test file on the AWS instance that we start.
smoke_test_check () {
# Ensure the ssh key has restricted permissions.
SSH_KEY=${WORKSPACE}/test/keyring/id_rsa
chmod 0600 $SSH_KEY
SMOKE_TEST=$(ssh -i ${SSH_KEY} redhat@${1} 'cat /etc/smoke-test.txt')
if [[ $SMOKE_TEST == smoke-test ]]; then
echo 1
else
echo 0
fi
}
# Write a basic blueprint for our image.
# NOTE(mhayden): The service customization will always be required for QCOW2
# but it is needed for OpenStack due to issue #698 in osbuild-composer. 😭
tee $BLUEPRINT_FILE > /dev/null << EOF
name = "bash"
description = "A base system with bash"
version = "0.0.1"
[[packages]]
name = "bash"
[customizations.services]
enabled = ["sshd", "cloud-init", "cloud-init-local", "cloud-config", "cloud-final"]
EOF
# Copy the internal Fedora repository into place.
if curl -fs http://download.devel.redhat.com > /dev/null; then
sudo cp ${WORKSPACE}/test/internal-repos/fedora-31.json \
/usr/share/osbuild-composer/repositories/fedora-31.json
sudo cp ${WORKSPACE}/test/internal-repos/fedora-32.json \
/usr/share/osbuild-composer/repositories/fedora-32.json
sudo systemctl stop osbuild-composer.service
fi
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push $BLUEPRINT_FILE
sudo composer-cli blueprints depsolve bash
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | egrep -o "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u ${WORKER_UNIT} &
WORKER_JOURNAL_PID=$!
# Start the compose
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bash $IMAGE_TYPE | tee $COMPOSE_START > /dev/null
COMPOSE_ID=$(jq -r '.build_id' $COMPOSE_START)
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info ${COMPOSE_ID} | tee $COMPOSE_INFO > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' $COMPOSE_INFO)
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 30
done
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Stop watching the worker journal.
sudo kill ${WORKER_JOURNAL_PID}
# Download the image.
greenprint "📥 Downloading the image"
sudo composer-cli compose image ${COMPOSE_ID} > /dev/null
IMAGE_FILENAME=$(basename $(find . -maxdepth 1 -type f -name '*.qcow2'))
QCOW_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.qcow2
sudo mv $IMAGE_FILENAME $QCOW_IMAGE_PATH
# Set up a cloud-init ISO.
greenprint "💿 Creating a cloud-init ISO"
CLOUD_INIT_PATH=/var/lib/libvirt/images/seed.iso
cp ${WORKSPACE}/test/cloud-init/*-data .
sudo genisoimage -o $CLOUD_INIT_PATH -V cidata -r -J user-data meta-data > /dev/null
# Ensure SELinux is happy with our new images.
greenprint "👿 Running restorecon on image directory"
sudo restorecon -Rv /var/lib/libvirt/images/
# Run virt-install to import the QCOW and boot it.
greenprint "🚀 Booting the image with libvirt"
sudo virt-install \
--name $IMAGE_KEY \
--memory 2048 \
--vcpus 2 \
--disk path=${QCOW_IMAGE_PATH} \
--disk path=${CLOUD_INIT_PATH} \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--cpu host-passthrough \
--network network=default
# Wait for the image to make a DHCP request.
greenprint "💻 Waiting for the instance to make a DHCP request."
for LOOP_COUNTER in {0..30}; do
sudo virsh domifaddr ${IMAGE_KEY} | tee $DOMIF_CHECK > /dev/null
# Check to see if the CIDR IP is in the output yet.
if grep -oP "[0-9\.]*/[0-9]*" $DOMIF_CHECK > /dev/null; then
INSTANCE_ADDRESS=$(grep -oP "[0-9\.]*/[0-9]*" $DOMIF_CHECK | sed 's#/.*$##')
echo "Found instance address: ${INSTANCE_ADDRESS}"
break
fi
# Wait 10 seconds and try again.
sleep 10
done
# Wait for SSH to start.
greenprint "⏱ Waiting for instance to respond to ssh"
for LOOP_COUNTER in {0..30}; do
if ssh-keyscan $INSTANCE_ADDRESS > /dev/null; then
ssh-keyscan $INSTANCE_ADDRESS >> ~/.ssh/known_hosts
break
fi
# ssh-keyscan has a 5 second timeout by default, so the pause per loop
# is 10 seconds when you include the following `sleep`.
sleep 5
done
# Check for our smoke test file.
greenprint "🛃 Checking for smoke test file"
for LOOP_COUNTER in {0..10}; do
RESULTS="$(smoke_test_check $INSTANCE_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "Smoke test passed! 🥳"
break
fi
sleep 5
done
if [[ $RESULTS != 1 ]]; then
sleep 3600
fi
# Clean up our mess.
greenprint "🧼 Cleaning up"
sudo virsh destroy ${IMAGE_KEY}
sudo virsh undefine ${IMAGE_KEY}
sudo rm -fv $QCOW_IMAGE_PATH $CLOUD_INIT_PATH
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then
greenprint "💚 Success"
else
greenprint "❌ Failed"
exit 1
fi
exit 0