tests: ship all tests in the -tests sub-package

No tests should be run directly from git, but should rather be installed
onto the test system using rpm and run from there. This moves towards
unifying our two types of test cases.

The new structure of is now:

`test/cmd`:   the executors, one for each test-case. This is installed
              into `/usr/libexec/test/osbuild-composer`.
`test/data`:  data and config used by the tests. This is installed into
              `/usr/share/tests/osbuild-composer`.
`schutzbot`:  configuration of the actual test run. In particular, this
              is where the distros and repositories to test against are
              configured.

This is very much still work-in-progress, and is only the first step
towards simplifying schutzbot. Apart from moving files around, this
should be a noop.

Signed-off-by: Tom Gundersen <teg@jklm.no>
This commit is contained in:
Tom Gundersen 2020-10-03 13:48:03 +01:00 committed by Ondřej Budai
parent 21e6ae5ef4
commit 805ae59151
81 changed files with 247 additions and 160 deletions

387
test/cmd/aws.sh Executable file
View file

@ -0,0 +1,387 @@
#!/bin/bash
set -euo pipefail
OSBUILD_COMPOSER_TEST_DATA=/usr/share/tests/osbuild-composer/
source /etc/os-release
# Fedora 31's composer-cli doesn't support doing image uploads.
if [[ $ID == fedora ]] && [[ $VERSION_ID == 31 ]]; then
echo "Fedora 31 does not support image uploads with composer-cli."
exit 0
fi
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
# Apply lorax patch to work around pytoml issues in RHEL 8.x.
# See BZ 1843704 or https://github.com/weldr/lorax/pull/1030 for more details.
if [[ $ID == rhel ]]; then
sudo sed -r -i 's#toml.load\(args\[3\]\)#toml.load(open(args[3]))#' \
/usr/lib/python3.6/site-packages/composer/cli/compose.py
sudo rm -f /usr/lib/python3.6/site-packages/composer/cli/compose.pyc
fi
# We need jq for parsing composer-cli output.
if ! hash jq; then
greenprint "Installing jq"
sudo dnf -qy install jq
fi
# We need awscli to talk to AWS.
if ! hash aws; then
greenprint "Installing awscli"
sudo dnf -y install unzip
pushd /tmp
curl -Ls --retry 5 --output awscliv2.zip \
https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip
unzip awscliv2.zip > /dev/null
sudo ./aws/install > /dev/null
aws --version
popd
fi
TEST_UUID=$(uuidgen)
IMAGE_KEY=osbuild-composer-aws-test-${TEST_UUID}
AWS_CMD="aws --region $AWS_REGION --output json --color on"
# Jenkins sets WORKSPACE to the job workspace, but if this script runs
# outside of Jenkins, we can set up a temporary directory instead.
if [[ ${WORKSPACE:-empty} == empty ]]; then
WORKSPACE=$(mktemp -d)
fi
# Set up temporary files.
TEMPDIR=$(mktemp -d)
AWS_CONFIG=${TEMPDIR}/aws.toml
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
AWS_INSTANCE_JSON=${TEMPDIR}/aws-instance.json
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
AMI_DATA=${TEMPDIR}/ami-data-${IMAGE_KEY}.json
INSTANCE_DATA=${TEMPDIR}/instance-data-${IMAGE_KEY}.json
INSTANCE_CONSOLE=${TEMPDIR}/instance-console-${IMAGE_KEY}.json
# Check for the smoke test file on the AWS instance that we start.
smoke_test_check () {
# Ensure the ssh key has restricted permissions.
SSH_KEY=${OSBUILD_COMPOSER_TEST_DATA}keyring/id_rsa
SMOKE_TEST=$(sudo ssh -i "${SSH_KEY}" redhat@"${1}" 'cat /etc/smoke-test.txt')
if [[ $SMOKE_TEST == smoke-test ]]; then
echo 1
else
echo 0
fi
}
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
tar -xf "$TARBALL"
rm -f "$TARBALL"
# Move the JSON file into place.
cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Get the console screenshot from the AWS instance.
store_instance_screenshot () {
INSTANCE_ID=${1}
LOOP_COUNTER=${2}
SCREENSHOT_FILE=${WORKSPACE}/console-screenshot-${ID}-${VERSION_ID}-${LOOP_COUNTER}.jpg
$AWS_CMD ec2 get-console-screenshot --instance-id "${1}" > "$INSTANCE_CONSOLE"
jq -r '.ImageData' "$INSTANCE_CONSOLE" | base64 -d - > "$SCREENSHOT_FILE"
}
# Write an AWS TOML file
tee "$AWS_CONFIG" > /dev/null << EOF
provider = "aws"
[settings]
accessKeyID = "${AWS_ACCESS_KEY_ID}"
secretAccessKey = "${AWS_SECRET_ACCESS_KEY}"
bucket = "${AWS_BUCKET}"
region = "${AWS_REGION}"
key = "${IMAGE_KEY}"
EOF
# Write a basic blueprint for our image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bash"
description = "A base system with bash"
version = "0.0.1"
[[packages]]
name = "bash"
[customizations.services]
enabled = ["sshd", "cloud-init", "cloud-init-local", "cloud-config", "cloud-final"]
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bash
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose and upload to AWS.
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bash ami "$IMAGE_KEY" "$AWS_CONFIG" | tee "$COMPOSE_START"
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 30
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Find the image that we made in AWS.
greenprint "🔍 Search for created AMI"
$AWS_CMD ec2 describe-images \
--owners self \
--filters Name=name,Values="${IMAGE_KEY}" \
| tee "$AMI_DATA" > /dev/null
AMI_IMAGE_ID=$(jq -r '.Images[].ImageId' "$AMI_DATA")
# Stop watching the worker journal.
sudo kill ${WORKER_JOURNAL_PID}
# NOTE(mhayden): Getting TagSpecifications to play along with bash's
# parsing of curly braces and square brackets is nuts, so we just write some
# json and pass it to the aws command.
tee "$AWS_INSTANCE_JSON" > /dev/null << EOF
{
"TagSpecifications": [
{
"ResourceType": "instance",
"Tags": [
{
"Key": "Name",
"Value": "${IMAGE_KEY}"
}
]
}
]
}
EOF
# Build instance in AWS with our image.
greenprint "👷🏻 Building instance in AWS"
$AWS_CMD ec2 run-instances \
--associate-public-ip-address \
--key-name personal_servers \
--image-id "${AMI_IMAGE_ID}" \
--instance-type t3a.micro \
--user-data file://"${OSBUILD_COMPOSER_TEST_DATA}"/cloud-init/user-data \
--cli-input-json file://"${AWS_INSTANCE_JSON}" > /dev/null
# Wait for the instance to finish building.
greenprint "⏱ Waiting for AWS instance to be marked as running"
while true; do
$AWS_CMD ec2 describe-instances \
--filters Name=image-id,Values="${AMI_IMAGE_ID}" \
| tee "$INSTANCE_DATA" > /dev/null
INSTANCE_STATUS=$(jq -r '.Reservations[].Instances[].State.Name' "$INSTANCE_DATA")
# Break the loop if our instance is running.
if [[ $INSTANCE_STATUS == running ]]; then
break
fi
# Sleep for 10 seconds and try again.
sleep 10
done
# Get data about the instance we built.
INSTANCE_ID=$(jq -r '.Reservations[].Instances[].InstanceId' "$INSTANCE_DATA")
PUBLIC_IP=$(jq -r '.Reservations[].Instances[].PublicIpAddress' "$INSTANCE_DATA")
# Wait for the node to come online.
greenprint "⏱ Waiting for AWS instance to respond to ssh"
for LOOP_COUNTER in {0..30}; do
if ssh-keyscan "$PUBLIC_IP" > /dev/null 2>&1; then
echo "SSH is up!"
ssh-keyscan "$PUBLIC_IP" | sudo tee -a /root/.ssh/known_hosts
break
fi
# Get a screenshot of the instance console.
echo "Getting instance screenshot..."
store_instance_screenshot "$INSTANCE_ID" "$LOOP_COUNTER" || true
# ssh-keyscan has a 5 second timeout by default, so the pause per loop
# is 10 seconds when you include the following `sleep`.
echo "Retrying in 5 seconds..."
sleep 5
done
# Check for our smoke test file.
greenprint "🛃 Checking for smoke test file"
for LOOP_COUNTER in {0..10}; do
RESULTS="$(smoke_test_check "$PUBLIC_IP")"
if [[ $RESULTS == 1 ]]; then
echo "Smoke test passed! 🥳"
break
fi
sleep 5
done
# Clean up our mess.
greenprint "🧼 Cleaning up"
SNAPSHOT_ID=$(jq -r '.Images[].BlockDeviceMappings[].Ebs.SnapshotId' "$AMI_DATA")
$AWS_CMD ec2 terminate-instances --instance-id "${INSTANCE_ID}"
$AWS_CMD ec2 deregister-image --image-id "${AMI_IMAGE_ID}"
$AWS_CMD ec2 delete-snapshot --snapshot-id "${SNAPSHOT_ID}"
# Use the return code of the smoke test to determine if we passed or failed.
# On rhel continue with the cloudapi test
if [[ $RESULTS == 1 ]] && [[ $ID != rhel ]]; then
greenprint "💚 Success"
exit 0
elif [[ $RESULTS != 1 ]]; then
greenprint "❌ Failed"
exit 1
fi
CLOUD_REQUEST_FILE=${TEMPDIR}/image_request.json
REPOSITORY_RHEL=repositories/rhel-8.json
if [[ $VERSION_ID == 8.3 ]]; then
REPOSITORY_RHEL=repositories/rhel-8-beta.json
fi
sudo systemctl stop osbuild-worker*
sudo systemctl start osbuild-remote-worker@localhost:8704
BASE_URL=$(jq -r '.x86_64[0].baseurl' "$REPOSITORY_RHEL")
APPSTREAM_URL=$(jq -r '.x86_64[1].baseurl' "$REPOSITORY_RHEL")
SNAPSHOT_NAME=$(cat /proc/sys/kernel/random/uuid)
tee "$CLOUD_REQUEST_FILE" > /dev/null << EOF
{
"distribution": "rhel-8",
"image_requests": [
{
"architecture": "x86_64",
"image_type": "qcow2",
"repositories": [
{
"baseurl": "${BASE_URL}",
"rhsm": true
},
{
"baseurl": "${APPSTREAM_URL}",
"rhsm": true
}
],
"upload_requests": [
{
"type": "aws",
"options": {
"region": "${AWS_REGION}",
"s3": {
"access_key_id": "${AWS_ACCESS_KEY_ID}",
"secret_access_key": "${AWS_SECRET_ACCESS_KEY}",
"bucket": "${AWS_BUCKET}"
},
"ec2": {
"access_key_id": "${AWS_ACCESS_KEY_ID}",
"secret_access_key": "${AWS_SECRET_ACCESS_KEY}",
"snapshot_name": "${SNAPSHOT_NAME}"
}
}
}
]
}
]
}
EOF
COMPOSE_ID=$(curl -sS -H 'Content-Type: application/json' -X POST -d @"$CLOUD_REQUEST_FILE" http://localhost:8703/compose | jq -r '.id')
# Wait for the compose to finish.
greenprint "⏱ Waiting for cloud compose to finish: ${COMPOSE_ID}"
for LOOP_COUNTER in {0..40}; do
COMPOSE_STATUS=$(curl -sS http://localhost:8703/compose/"$COMPOSE_ID" | jq -r '.status')
echo "Cloud compose $COMPOSE_ID status: $COMPOSE_STATUS"
if [[ $COMPOSE_STATUS == FAILED ]]; then
echo "Something went wrong with the cloudapi compose. 😢"
exit 1
elif [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
sleep 30
done
# Find the image that we made in AWS.
greenprint "🔍 Search for created AMI"
$AWS_CMD ec2 describe-images \
--owners self \
--filters Name=name,Values="$SNAPSHOT_NAME" \
| tee "$AMI_DATA" > /dev/null
AMI_IMAGE_ID=$(jq -r '.Images[].ImageId' "$AMI_DATA")
SNAPSHOT_ID=$(jq -r '.Images[].BlockDeviceMappings[].Ebs.SnapshotId' "$AMI_DATA")
# Delete the image without running it
greenprint "🧼 Cleaning up composer cloud image"
$AWS_CMD ec2 deregister-image --image-id "$AMI_IMAGE_ID"
$AWS_CMD ec2 delete-snapshot --snapshot-id "$SNAPSHOT_ID"
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then
greenprint "💚 Success"
else
greenprint "❌ Failed"
exit 1
fi
exit 0

85
test/cmd/koji.sh Executable file
View file

@ -0,0 +1,85 @@
#!/bin/bash
set -euo pipefail
OSBUILD_COMPOSER_TEST_DATA=/usr/share/tests/osbuild-composer/
# Get OS data.
source /etc/os-release
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
if [[ $ID == rhel ]] && ! rpm -q epel-release; then
greenprint "📦 Setting up EPEL repository"
curl -Ls --retry 5 --output /tmp/epel.rpm \
https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
sudo rpm -Uvh /tmp/epel.rpm
fi
greenprint "Installing required packages"
sudo dnf -y install \
container-selinux \
dnsmasq \
krb5-workstation \
koji \
podman \
python3 \
sssd-krb5
if [[ $ID == rhel ]]; then
greenprint "Tweaking podman, maybe."
sudo cp /usr/share/tests/osbuild-composer/vendor/87-podman-bridge.conflist /etc/cni/net.d/
sudo cp /usr/share/tests/osbuild-composer/vendor/dnsname /usr/libexec/cni/
fi
greenprint "Starting containers"
sudo ./internal/upload/koji/run-koji-container.sh start
greenprint "Copying custom worker config"
sudo mkdir -p /etc/osbuild-worker
sudo cp "${OSBUILD_COMPOSER_TEST_DATA}"/composer/osbuild-worker.toml \
/etc/osbuild-worker/
greenprint "Adding kerberos config"
sudo cp \
/tmp/osbuild-composer-koji-test/client.keytab \
/etc/osbuild-composer/client.keytab
sudo cp \
/tmp/osbuild-composer-koji-test/client.keytab \
/etc/osbuild-worker/client.keytab
sudo cp \
"${OSBUILD_COMPOSER_TEST_DATA}"/kerberos/krb5-local.conf \
/etc/krb5.conf.d/local
greenprint "Adding generated CA cert for Koji"
sudo cp \
/tmp/osbuild-composer-koji-test/ca-crt.pem \
/etc/pki/ca-trust/source/anchors/koji-ca-crt.pem
sudo update-ca-trust
greenprint "Restarting composer to pick up new config"
sudo systemctl restart osbuild-composer
sudo systemctl restart osbuild-worker\@1
greenprint "Testing Koji"
koji --server=http://localhost:8080/kojihub --user=osbuild --password=osbuildpass --authtype=password hello
greenprint "Creating Koji task"
koji --server=http://localhost:8080/kojihub --user kojiadmin --password kojipass --authtype=password make-task image
greenprint "Pushing compose to Koji"
sudo ./tools/koji-compose.py "${ID}-${VERSION_ID%.*}"
greenprint "Show Koji task"
koji --server=http://localhost:8080/kojihub taskinfo 1
koji --server=http://localhost:8080/kojihub buildinfo 1
greenprint "Stopping containers"
sudo ./internal/upload/koji/run-koji-container.sh stop
greenprint "Removing generated CA cert"
sudo rm \
/etc/pki/ca-trust/source/anchors/koji-ca-crt.pem
sudo update-ca-trust

482
test/cmd/ostree.sh Executable file
View file

@ -0,0 +1,482 @@
#!/bin/bash
set -euo pipefail
OSBUILD_COMPOSER_TEST_DATA=/usr/share/tests/osbuild-composer/
# Get OS data.
source /etc/os-release
ARCH=$(uname -m)
# Set os-variant and boot location used by virt-install.
case "${ID}-${VERSION_ID}" in
# Bypass ostree test on fedora-31 and rhel 8.2
"fedora-31")
exit 0;;
"rhel-8.2")
exit 0;;
"fedora-32")
IMAGE_TYPE=fedora-iot-commit
OSTREE_REF="fedora/32/${ARCH}/iot"
OS_VARIANT="fedora32"
BOOT_LOCATION="https://mirrors.rit.edu/fedora/fedora/linux/releases/32/Everything/x86_64/os/";;
"rhel-8.3")
# Override old rhel-8-beta.json because test needs latest systemd and redhat-release
sudo cp schutzbot/repositories/rhel-8-beta.json /etc/osbuild-composer/repositories/
sudo systemctl restart osbuild-composer.socket
IMAGE_TYPE=rhel-edge-commit
OSTREE_REF="rhel/8/${ARCH}/edge"
OS_VARIANT="rhel8-unknown"
BOOT_LOCATION="http://download.devel.redhat.com/rhel-8/nightly/RHEL-8/latest-RHEL-8.3/compose/BaseOS/x86_64/os/";;
*) ;;
esac
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
# Mock is only available in EPEL for RHEL.
if [[ $ID == rhel ]] && ! rpm -q epel-release; then
greenprint "📦 Setting up EPEL repository"
curl -Ls --retry 5 --output /tmp/epel.rpm \
https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
sudo rpm -Uvh /tmp/epel.rpm
fi
# Install required packages.
greenprint "📦 Installing required packages"
sudo dnf -y install jq libvirt-client libvirt-daemon \
libvirt-daemon-config-network libvirt-daemon-config-nwfilter \
libvirt-daemon-driver-interface libvirt-daemon-driver-network \
libvirt-daemon-driver-nodedev libvirt-daemon-driver-nwfilter \
libvirt-daemon-driver-qemu libvirt-daemon-driver-secret \
libvirt-daemon-driver-storage libvirt-daemon-driver-storage-disk \
libvirt-daemon-kvm qemu-img qemu-kvm virt-install expect \
python3-lxml ansible httpd
# Start libvirtd and test it.
greenprint "🚀 Starting libvirt daemon"
sudo systemctl start libvirtd
sudo virsh list --all > /dev/null
# Set a customized dnsmasq configuration for libvirt so we always get the
# same address on bootup.
sudo tee /tmp/integration.xml > /dev/null << EOF
<network>
<name>integration</name>
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='integration' stp='on' delay='0'/>
<mac address='52:54:00:36:46:ef'/>
<ip address='192.168.100.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.100.2' end='192.168.100.254'/>
<host mac='34:49:22:B0:83:30' name='vm' ip='192.168.100.50'/>
</dhcp>
</ip>
</network>
EOF
if ! sudo virsh net-info integration > /dev/null 2>&1; then
sudo virsh net-define /tmp/integration.xml
sudo virsh net-start integration
fi
# Allow anyone in the wheel group to talk to libvirt.
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
WHEEL_GROUP=wheel
if [[ $ID == rhel ]]; then
WHEEL_GROUP=adm
fi
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.isInGroup("${WHEEL_GROUP}")) {
return polkit.Result.YES;
}
});
EOF
# Set up variables.
TEST_UUID=$(uuidgen)
IMAGE_KEY="osbuild-composer-ostree-test-${TEST_UUID}"
GUEST_ADDRESS=192.168.100.50
# Set up temporary files.
TEMPDIR=$(mktemp -d)
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
KS_FILE=${TEMPDIR}/ks.cfg
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
# SSH setup.
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5)
SSH_KEY=${OSBUILD_COMPOSER_TEST_DATA}keyring/id_rsa
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
tar -xf "$TARBALL" -C "${TEMPDIR}"
rm -f "$TARBALL"
# Move the JSON file into place.
cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Build ostree image.
build_image() {
blueprint_file=$1
blueprint_name=$2
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$blueprint_file"
sudo composer-cli blueprints depsolve "$blueprint_name"
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose.
greenprint "🚀 Starting compose"
if [[ $blueprint_name == upgrade ]]; then
# Leave new version composer-cli here in case it got updated.
# sudo composer-cli --json compose start-ostree --ref $OSTREE_REF --parent $COMMIT_HASH $blueprint_name $IMAGE_TYPE | tee $COMPOSE_START
sudo composer-cli --json compose start-ostree "$blueprint_name" $IMAGE_TYPE "$OSTREE_REF" "$COMMIT_HASH" | tee "$COMPOSE_START"
else
sudo composer-cli --json compose start "$blueprint_name" $IMAGE_TYPE | tee "$COMPOSE_START"
fi
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 5
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Stop watching the worker journal.
sudo kill ${WORKER_JOURNAL_PID}
}
# Wait for the ssh server up to be.
wait_for_ssh_up () {
SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"')
if [[ $SSH_STATUS == READY ]]; then
echo 1
else
echo 0
fi
}
# Clean up our mess.
clean_up () {
greenprint "🧼 Cleaning up"
sudo virsh destroy "${IMAGE_KEY}"
if [[ $ARCH == aarch64 ]]; then
sudo virsh undefine "${IMAGE_KEY}" --nvram
else
sudo virsh undefine "${IMAGE_KEY}"
fi
# Remove qcow2 file.
sudo rm -f "$LIBVIRT_IMAGE_PATH"
# Remove extracted upgrade image-tar.
sudo rm -rf "$UPGRADE_PATH"
# Remove "remote" repo.
sudo rm -rf "${HTTPD_PATH}"/{repo,compose.json}
# Remomve tmp dir.
sudo rm -rf "$TEMPDIR"
# Stop httpd
sudo systemctl disable httpd --now
}
# Test result checking
check_result () {
greenprint "Checking for test result"
if [[ $RESULTS == 1 ]]; then
greenprint "💚 Success"
else
greenprint "❌ Failed"
clean_up
exit 1
fi
}
##################################################
##
## ostree image/commit installation
##
##################################################
# Write a blueprint for ostree image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "ostree"
description = "A base ostree image"
version = "0.0.1"
modules = []
groups = []
[[packages]]
name = "python36"
version = "*"
EOF
# Build installation image.
build_image "$BLUEPRINT_FILE" ostree
# Start httpd to serve ostree repo.
greenprint "🚀 Starting httpd daemon"
sudo systemctl start httpd
# Download the image and extract tar into web server root folder.
greenprint "📥 Downloading and extracting the image"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
HTTPD_PATH="/var/www/html"
sudo tar -xf "${IMAGE_FILENAME}" -C ${HTTPD_PATH}
sudo rm -f "$IMAGE_FILENAME"
# Clean compose and blueprints.
greenprint "Clean up osbuild-composer"
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
sudo composer-cli blueprints delete ostree > /dev/null
# Get ostree commit value.
greenprint "Get ostree image commit value"
COMMIT_HASH=$(jq -r '."ostree-commit"' < ${HTTPD_PATH}/compose.json)
# Ensure SELinux is happy with our new images.
greenprint "👿 Running restorecon on image directory"
sudo restorecon -Rv /var/lib/libvirt/images/
# Create qcow2 file for virt install.
greenprint "Create qcow2 file for virt install"
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.qcow2
sudo qemu-img create -f qcow2 "${LIBVIRT_IMAGE_PATH}" 20G
# Write kickstart file for ostree image installation.
greenprint "Generate kickstart file"
tee "$KS_FILE" > /dev/null << STOPHERE
text
lang en_US.UTF-8
keyboard us
timezone --utc Etc/UTC
selinux --enforcing
rootpw --lock --iscrypted locked
user --name=admin --groups=wheel --iscrypted --password=\$6\$1LgwKw9aOoAi/Zy9\$Pn3ErY1E8/yEanJ98evqKEW.DZp24HTuqXPJl6GYCm8uuobAmwxLv7rGCvTRZhxtcYdmC0.XnYRSR9Sh6de3p0
sshkey --username=admin "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC61wMCjOSHwbVb4VfVyl5sn497qW4PsdQ7Ty7aD6wDNZ/QjjULkDV/yW5WjDlDQ7UqFH0Sr7vywjqDizUAqK7zM5FsUKsUXWHWwg/ehKg8j9xKcMv11AkFoUoujtfAujnKODkk58XSA9whPr7qcw3vPrmog680pnMSzf9LC7J6kXfs6lkoKfBh9VnlxusCrw2yg0qI1fHAZBLPx7mW6+me71QZsS6sVz8v8KXyrXsKTdnF50FjzHcK9HXDBtSJS5wA3fkcRYymJe0o6WMWNdgSRVpoSiWaHHmFgdMUJaYoCfhXzyl7LtNb3Q+Sveg+tJK7JaRXBLMUllOlJ6ll5Hod root@localhost"
bootloader --timeout=1 --append="net.ifnames=0 modprobe.blacklist=vc4"
network --bootproto=dhcp --device=link --activate --onboot=on
zerombr
clearpart --all --initlabel --disklabel=msdos
autopart --nohome --noswap --type=plain
ostreesetup --nogpg --osname=${IMAGE_TYPE} --remote=${IMAGE_TYPE} --url=http://192.168.100.1/repo/ --ref=${OSTREE_REF}
poweroff
%post --log=/var/log/anaconda/post-install.log --erroronfail
# no sudo password for user admin
echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
# Remove any persistent NIC rules generated by udev
rm -vf /etc/udev/rules.d/*persistent-net*.rules
# And ensure that we will do DHCP on eth0 on startup
cat > /etc/sysconfig/network-scripts/ifcfg-eth0 << EOF
DEVICE="eth0"
BOOTPROTO="dhcp"
ONBOOT="yes"
TYPE="Ethernet"
PERSISTENT_DHCLIENT="yes"
EOF
echo "Packages within this iot or edge image:"
echo "-----------------------------------------------------------------------"
rpm -qa | sort
echo "-----------------------------------------------------------------------"
# Note that running rpm recreates the rpm db files which aren't needed/wanted
rm -f /var/lib/rpm/__db*
echo "Zeroing out empty space."
# This forces the filesystem to reclaim space from deleted files
dd bs=1M if=/dev/zero of=/var/tmp/zeros || :
rm -f /var/tmp/zeros
echo "(Don't worry -- that out-of-space error was expected.)"
%end
STOPHERE
# Install ostree image via anaconda.
greenprint "Install ostree image via anaconda"
sudo virt-install --initrd-inject="${KS_FILE}" \
--extra-args="ks=file:/ks.cfg console=ttyS0,115200" \
--name="${IMAGE_KEY}"\
--disk path="${LIBVIRT_IMAGE_PATH}",format=qcow2 \
--ram 3072 \
--vcpus 2 \
--network network=integration,mac=34:49:22:B0:83:30 \
--os-type linux \
--os-variant ${OS_VARIANT} \
--location ${BOOT_LOCATION} \
--nographics \
--noautoconsole \
--wait=-1 \
--noreboot
# Start VM.
greenprint "Start VM"
sudo virsh start "${IMAGE_KEY}"
# Check for ssh ready to go.
greenprint "🛃 Checking for SSH is ready to go"
for LOOP_COUNTER in $(seq 0 30); do
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "SSH is ready now! 🥳"
break
fi
sleep 10
done
# Check image installation result
check_result
##################################################
##
## ostree image/commit upgrade
##
##################################################
# Write a blueprint for ostree image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "upgrade"
description = "An upgrade ostree image"
version = "0.0.2"
modules = []
groups = []
[[packages]]
name = "python36"
version = "*"
[[packages]]
name = "wget"
version = "*"
EOF
# Build upgrade image.
build_image "$BLUEPRINT_FILE" upgrade
# Download the image and extract tar into web server root folder.
greenprint "📥 Downloading and extracting the image"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
UPGRADE_PATH="$(pwd)/upgrade"
mkdir -p "$UPGRADE_PATH"
sudo tar -xf "$IMAGE_FILENAME" -C "$UPGRADE_PATH"
sudo rm -f "$IMAGE_FILENAME"
# Clean compose and blueprints.
greenprint "Clean up osbuild-composer again"
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
sudo composer-cli blueprints delete upgrade > /dev/null
# Introduce new ostree commit into repo.
greenprint "Introduce new ostree commit into repo"
sudo ostree pull-local --repo "${HTTPD_PATH}/repo" "${UPGRADE_PATH}/repo" "$OSTREE_REF"
sudo ostree summary --update --repo "${HTTPD_PATH}/repo"
# Ensure SELinux is happy with all objects files.
greenprint "👿 Running restorecon on web server root folder"
sudo restorecon -Rv "${HTTPD_PATH}/repo" > /dev/null
# Get ostree commit value.
greenprint "Get ostree image commit value"
UPGRADE_HASH=$(jq -r '."ostree-commit"' < "${UPGRADE_PATH}"/compose.json)
# Upgrade image/commit.
greenprint "Upgrade ostree image/commit"
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${GUEST_ADDRESS} 'sudo rpm-ostree upgrade'
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit'
# Sleep 10 seconds here to make sure vm restarted already
sleep 10
# Check for ssh ready to go.
greenprint "🛃 Checking for SSH is ready to go"
# shellcheck disable=SC2034 # Unused variables left for readability
for LOOP_COUNTER in $(seq 0 30); do
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "SSH is ready now! 🥳"
break
fi
sleep 10
done
# Check ostree upgrade result
check_result
# Add instance IP address into /etc/ansible/hosts
sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF
[ostree_guest]
${GUEST_ADDRESS}
[ostree_guest:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_user=admin
ansible_private_key_file=${SSH_KEY}
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
EOF
# Test IoT/Edge OS
sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=${IMAGE_TYPE} -e ostree_commit="${UPGRADE_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0
check_result
# Final success clean up
clean_up
exit 0

319
test/cmd/qemu.sh Executable file
View file

@ -0,0 +1,319 @@
#!/bin/bash
set -euo pipefail
OSBUILD_COMPOSER_TEST_DATA=/usr/share/tests/osbuild-composer/
# Get OS data.
source /etc/os-release
ARCH=$(uname -m)
# Take the image type passed to the script or use qcow2 by default if nothing
# was passed.
IMAGE_TYPE=${1:-qcow2}
# Select the file extension based on the image that we are building.
IMAGE_EXTENSION=$IMAGE_TYPE
if [[ $IMAGE_TYPE == 'openstack' ]]; then
IMAGE_EXTENSION=qcow2
fi
# RHEL 8 cannot boot a VMDK using libvirt. See BZ 999789.
if [[ $IMAGE_TYPE == vmdk ]]; then
echo "🤷 libvirt cannot boot stream-optimized VMDK."
exit 0
fi
# Apply lorax patch to work around pytoml issues in RHEL 8.x.
# See BZ 1843704 or https://github.com/weldr/lorax/pull/1030 for more details.
if [[ $ID == rhel ]]; then
sudo sed -r -i 's#toml.load\(args\[3\]\)#toml.load(open(args[3]))#' \
/usr/lib/python3.6/site-packages/composer/cli/compose.py
sudo rm -f /usr/lib/python3.6/site-packages/composer/cli/compose.pyc
fi
# Colorful output.
function greenprint {
echo -e "\033[1;32m${1}\033[0m"
}
# Install required packages.
greenprint "📦 Installing required packages"
sudo dnf -y install jq libvirt-client libvirt-daemon \
libvirt-daemon-config-network libvirt-daemon-config-nwfilter \
libvirt-daemon-driver-interface libvirt-daemon-driver-network \
libvirt-daemon-driver-nodedev libvirt-daemon-driver-nwfilter \
libvirt-daemon-driver-qemu libvirt-daemon-driver-secret \
libvirt-daemon-driver-storage libvirt-daemon-driver-storage-disk \
libvirt-daemon-kvm qemu-img qemu-kvm virt-install
# Start libvirtd and test it.
greenprint "🚀 Starting libvirt daemon"
sudo systemctl start libvirtd
sudo virsh list --all > /dev/null
# Set a customized dnsmasq configuration for libvirt so we always get the
# same address on bootup.
sudo tee /tmp/integration.xml > /dev/null << EOF
<network>
<name>integration</name>
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='integration' stp='on' delay='0'/>
<mac address='52:54:00:36:46:ef'/>
<ip address='192.168.100.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.100.2' end='192.168.100.254'/>
<host mac='34:49:22:B0:83:30' name='vm' ip='192.168.100.50'/>
</dhcp>
</ip>
</network>
EOF
if ! sudo virsh net-info integration > /dev/null 2>&1; then
sudo virsh net-define /tmp/integration.xml
sudo virsh net-start integration
fi
# Allow anyone in the wheel group to talk to libvirt.
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
WHEEL_GROUP=wheel
if [[ $ID == rhel ]]; then
WHEEL_GROUP=adm
fi
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.isInGroup("${WHEEL_GROUP}")) {
return polkit.Result.YES;
}
});
EOF
# Set up variables.
TEST_UUID=$(uuidgen)
IMAGE_KEY=osbuild-composer-aws-test-${TEST_UUID}
INSTANCE_ADDRESS=192.168.100.50
# Set up temporary files.
TEMPDIR=$(mktemp -d)
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
# Check for the smoke test file on the AWS instance that we start.
smoke_test_check () {
# Ensure the ssh key has restricted permissions.
SSH_KEY=${OSBUILD_COMPOSER_TEST_DATA}keyring/id_rsa
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o ConnectTimeout=5)
SMOKE_TEST=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" redhat@"${1}" 'cat /etc/smoke-test.txt')
if [[ $SMOKE_TEST == smoke-test ]]; then
echo 1
else
echo 0
fi
}
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
tar -xf "$TARBALL"
rm -f "$TARBALL"
# Move the JSON file into place.
cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Write a basic blueprint for our image.
# NOTE(mhayden): The service customization will always be required for QCOW2
# but it is needed for OpenStack due to issue #698 in osbuild-composer. 😭
# NOTE(mhayden): The cloud-init package isn't included in VHD/Azure images
# by default and it must be added here.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bash"
description = "A base system with bash"
version = "0.0.1"
[[packages]]
name = "bash"
[[packages]]
name = "cloud-init"
[customizations.services]
enabled = ["sshd", "cloud-init", "cloud-init-local", "cloud-config", "cloud-final"]
[customizations.kernel]
append = "LANG=en_US.UTF-8 net.ifnames=0 biosdevname=0"
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bash
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bash "$IMAGE_TYPE" | tee "$COMPOSE_START"
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 5
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Stop watching the worker journal.
sudo kill ${WORKER_JOURNAL_PID}
# Download the image.
greenprint "📥 Downloading the image"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME=$(basename "$(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}")")
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
# Prepare cloud-init data.
CLOUD_INIT_DIR=$(mktemp -d)
cp "${OSBUILD_COMPOSER_TEST_DATA}"/cloud-init/{meta,user}-data "${CLOUD_INIT_DIR}"/
cp "${OSBUILD_COMPOSER_TEST_DATA}"/cloud-init/network-config "${CLOUD_INIT_DIR}"/
# Set up a cloud-init ISO.
greenprint "💿 Creating a cloud-init ISO"
CLOUD_INIT_PATH=/var/lib/libvirt/images/seed.iso
rm -f $CLOUD_INIT_PATH
pushd "$CLOUD_INIT_DIR"
sudo genisoimage -o $CLOUD_INIT_PATH -V cidata \
-r -J user-data meta-data network-config > /dev/null 2>&1
popd
# Ensure SELinux is happy with our new images.
greenprint "👿 Running restorecon on image directory"
sudo restorecon -Rv /var/lib/libvirt/images/
# Run virt-install to import the QCOW and boot it.
greenprint "🚀 Booting the image with libvirt"
if [[ $ARCH == 'ppc64le' ]]; then
# ppc64le has some machine quirks that must be worked around.
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 2048 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30 \
--qemu-commandline="-machine pseries,cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-ccf-assist=off,cap-large-decr=off"
elif [[ $ARCH == 's390x' ]]; then
# Our s390x machines are highly constrained on resources.
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 512 \
--vcpus 1 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30
else
sudo virt-install \
--name "$IMAGE_KEY" \
--memory 1024 \
--vcpus 2 \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
--noautoconsole \
--network network=integration,mac=34:49:22:B0:83:30
fi
# Set a number of maximum loops to check for our smoke test file via ssh.
case $ARCH in
s390x)
# s390x needs more time to boot its VM.
MAX_LOOPS=60
;;
*)
MAX_LOOPS=30
;;
esac
# Check for our smoke test file.
greenprint "🛃 Checking for smoke test file in VM"
# shellcheck disable=SC2034 # Unused variables left for readability
for LOOP_COUNTER in $(seq 0 ${MAX_LOOPS}); do
RESULTS="$(smoke_test_check $INSTANCE_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "Smoke test passed! 🥳"
break
fi
sleep 10
done
# Clean up our mess.
greenprint "🧼 Cleaning up"
sudo virsh destroy "${IMAGE_KEY}"
if [[ $ARCH == aarch64 ]]; then
sudo virsh undefine "${IMAGE_KEY}" --nvram
else
sudo virsh undefine "${IMAGE_KEY}"
fi
sudo rm -f "$LIBVIRT_IMAGE_PATH" $CLOUD_INIT_PATH
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then
greenprint "💚 Success"
else
greenprint "❌ Failed"
exit 1
fi
exit 0