diff --git a/test/cases/ostree-ng-og.sh b/test/cases/ostree-ng-og.sh deleted file mode 100644 index fce5fec10..000000000 --- a/test/cases/ostree-ng-og.sh +++ /dev/null @@ -1,648 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# Provision the software under test. -/usr/libexec/osbuild-composer-test/provision.sh - -# Get OS data. -source /etc/os-release -ARCH=$(uname -m) - -# Colorful output. -function greenprint { - echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" -} - -# Start libvirtd and test it. -greenprint "๐Ÿš€ Starting libvirt daemon" -sudo systemctl start libvirtd -sudo virsh list --all > /dev/null - -# Set a customized dnsmasq configuration for libvirt so we always get the -# same address on bootup. -sudo tee /tmp/integration.xml > /dev/null << EOF - - integration - 1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579 - - - - - - - - - - - - - - - -EOF -if ! sudo virsh net-info integration > /dev/null 2>&1; then - sudo virsh net-define /tmp/integration.xml -fi -if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then - sudo virsh net-start integration -fi - -# Allow anyone in the wheel group to talk to libvirt. -greenprint "๐Ÿšช Allowing users in wheel group to talk to libvirt" -sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF -polkit.addRule(function(action, subject) { - if (action.id == "org.libvirt.unix.manage" && - subject.isInGroup("adm")) { - return polkit.Result.YES; - } -}); -EOF - -# Set up variables. -OSTREE_REF="test/rhel/9/${ARCH}/edge" -OS_VARIANT="rhel9-unknown" -TEST_UUID=$(uuidgen) -IMAGE_KEY="osbuild-composer-ostree-test-${TEST_UUID}" -BIOS_GUEST_ADDRESS=192.168.100.50 -UEFI_GUEST_ADDRESS=192.168.100.51 -PROD_REPO_URL=http://192.168.100.1/repo -PROD_REPO=/var/www/html/repo -STAGE_REPO_ADDRESS=192.168.200.1 -STAGE_REPO_URL="http://${STAGE_REPO_ADDRESS}/repo/" -QUAY_REPO_URL="docker://quay.io/osbuild/testing-rhel-edge-push" -QUAY_REPO_TAG=$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '') -ARTIFACTS="ci-artifacts" -mkdir -p "${ARTIFACTS}" - -# Set up temporary files. -TEMPDIR=$(mktemp -d) -BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml -KS_FILE=${TEMPDIR}/ks.cfg -COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json -COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json - -# SSH setup. -SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5) -SSH_DATA_DIR=$(/usr/libexec/osbuild-composer-test/gen-ssh.sh) -SSH_KEY=${SSH_DATA_DIR}/id_rsa -SSH_KEY_PUB=$(cat "${SSH_KEY}".pub) - -if [[ "${ID}-${VERSION_ID}" != "rhel-9.0" ]]; then - echo "unsupported distro: ${ID}-${VERSION_ID}" - exit 1 -fi - -CONTAINER_TYPE=edge-container -CONTAINER_FILENAME=container.tar -INSTALLER_TYPE=edge-installer -INSTALLER_FILENAME=installer.iso - - -# Get the compose log. -get_compose_log () { - COMPOSE_ID=$1 - LOG_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log - - # Download the logs. - sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null -} - -# Get the compose metadata. -get_compose_metadata () { - COMPOSE_ID=$1 - METADATA_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json - - # Download the metadata. - sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null - - # Find the tarball and extract it. - TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")") - tar -xf "$TARBALL" -C "${TEMPDIR}" - rm -f "$TARBALL" - - # Move the JSON file into place. - cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null -} - -# Build ostree image. -build_image() { - blueprint_name=$1 - image_type=$2 - - # Get worker unit file so we can watch the journal. - WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service") - sudo journalctl -af -n 1 -u "${WORKER_UNIT}" & - WORKER_JOURNAL_PID=$! - # Stop watching the worker journal when exiting. - trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT - - # Start the compose. - greenprint "๐Ÿš€ Starting compose" - if [ $# -eq 3 ]; then - repo_url=$3 - sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" --url "$repo_url" "$blueprint_name" "$image_type" | tee "$COMPOSE_START" - else - sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" "$blueprint_name" "$image_type" | tee "$COMPOSE_START" - fi - COMPOSE_ID=$(jq -r '.body.build_id' "$COMPOSE_START") - - # Wait for the compose to finish. - greenprint "โฑ Waiting for compose to finish: ${COMPOSE_ID}" - while true; do - sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null - COMPOSE_STATUS=$(jq -r '.body.queue_status' "$COMPOSE_INFO") - - # Is the compose finished? - if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then - break - fi - - # Wait 30 seconds and try again. - sleep 5 - done - - # Capture the compose logs from osbuild. - greenprint "๐Ÿ’ฌ Getting compose log and metadata" - get_compose_log "$COMPOSE_ID" - get_compose_metadata "$COMPOSE_ID" - - # Kill the journal monitor immediately and remove the trap - sudo pkill -P ${WORKER_JOURNAL_PID} - trap - EXIT - - # Did the compose finish with success? - if [[ $COMPOSE_STATUS != FINISHED ]]; then - echo "Something went wrong with the compose. ๐Ÿ˜ข" - exit 1 - fi -} - -# Wait for the ssh server up to be. -wait_for_ssh_up () { - SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"') - if [[ $SSH_STATUS == READY ]]; then - echo 1 - else - echo 0 - fi -} - -# Clean up our mess. -clean_up () { - greenprint "๐Ÿงผ Cleaning up" - # Remove tag from quay.io repo - skopeo delete --creds "${V2_QUAY_USERNAME}:${V2_QUAY_PASSWORD}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}" - - # Clear vm - if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then - sudo virsh destroy "${IMAGE_KEY}-uefi" - fi - sudo virsh undefine "${IMAGE_KEY}-uefi" --nvram - # Remove qcow2 file. - sudo rm -f "$LIBVIRT_UEFI_IMAGE_PATH" - - # Remove any status containers if exist - sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f - # Remove all images - sudo podman rmi -f -a - - # Remove prod repo - sudo rm -rf "$PROD_REPO" - - # Remomve tmp dir. - sudo rm -rf "$TEMPDIR" - - # Stop prod repo http service - sudo systemctl disable --now httpd -} - -# Test result checking -check_result () { - greenprint "๐ŸŽ Checking for test result" - if [[ $RESULTS == 1 ]]; then - greenprint "๐Ÿ’š Success" - else - greenprint "โŒ Failed" - clean_up - exit 1 - fi -} - -########################################################### -## -## Prepare edge prod and stage repo -## -########################################################### -greenprint "๐Ÿ”ง Prepare edge prod repo" -# Start prod repo web service -# osbuild-composer-tests have mod_ssl as a dependency. The package installs -# an example configuration which automatically enabled httpd on port 443, but -# that one is already in use. Remove the default configuration as it is useless -# anyway. -sudo rm -f /etc/httpd/conf.d/ssl.conf -sudo systemctl enable --now httpd.service - -# Have a clean prod repo -sudo rm -rf "$PROD_REPO" -sudo mkdir -p "$PROD_REPO" -sudo ostree --repo="$PROD_REPO" init --mode=archive -sudo ostree --repo="$PROD_REPO" remote add --no-gpg-verify edge-stage "$STAGE_REPO_URL" - -# Prepare stage repo network -greenprint "๐Ÿ”ง Prepare stage repo network" -sudo podman network inspect edge >/dev/null 2>&1 || sudo podman network create --driver=bridge --subnet=192.168.200.0/24 --gateway=192.168.200.254 edge - -########################################################## -## -## edge container image for building installer image -## -########################################################## - -# Write a blueprint for ostree image. -tee "$BLUEPRINT_FILE" > /dev/null << EOF -name = "container" -description = "A base rhel-edge container image" -version = "0.0.1" -modules = [] -groups = [] - -[[packages]] -name = "python3" -version = "*" - -[[customizations.user]] -name = "admin" -description = "Administrator account" -password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl." -key = "${SSH_KEY_PUB}" -home = "/home/admin/" -groups = ["wheel"] -EOF - -greenprint "๐Ÿ“„ container blueprint" -cat "$BLUEPRINT_FILE" - -# Prepare the blueprint for the compose. -greenprint "๐Ÿ“‹ Preparing container blueprint" -sudo composer-cli blueprints push "$BLUEPRINT_FILE" -sudo composer-cli blueprints depsolve container - -# Build container image. -build_image container "${CONTAINER_TYPE}" - -# Download the image -greenprint "๐Ÿ“ฅ Downloading the container image" -sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null - -# Clear stage repo running env -greenprint "๐Ÿงน Clearing stage repo running env" -# Remove any status containers if exist -sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f -# Remove all images -sudo podman rmi -f -a - -# Deal with stage repo image -greenprint "๐Ÿ—œ Pushing image to quay.io" -IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}" -skopeo copy --dest-creds "${V2_QUAY_USERNAME}:${V2_QUAY_PASSWORD}" "oci-archive:${IMAGE_FILENAME}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}" -greenprint "Downloading image from quay.io" -sudo podman login quay.io --username "${V2_QUAY_USERNAME}" --password "${V2_QUAY_PASSWORD}" -sudo podman pull "${QUAY_REPO_URL}:${QUAY_REPO_TAG}" -sudo podman images -greenprint "๐Ÿ—œ Running the image" -sudo podman run -d --name rhel-edge --network edge --ip "$STAGE_REPO_ADDRESS" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}" -# Clear image file -sudo rm -f "$IMAGE_FILENAME" - -# Wait for container to be running -until [ "$(sudo podman inspect -f '{{.State.Running}}' rhel-edge)" == "true" ]; do - sleep 1; -done; - -# Sync installer edge content -greenprint "๐Ÿ“ก Sync installer content from stage repo" -sudo ostree --repo="$PROD_REPO" pull --mirror edge-stage "$OSTREE_REF" - -# Clean compose and blueprints. -greenprint "๐Ÿงฝ Clean up container blueprint and compose" -sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null -sudo composer-cli blueprints delete container > /dev/null - -######################################################## -## -## rhel-edge installer image building from container image -## -######################################################## - -# Write a blueprint for installer image. -tee "$BLUEPRINT_FILE" > /dev/null << EOF -name = "installer" -description = "A rhel-edge installer image" -version = "0.0.1" -modules = [] -groups = [] -EOF - -greenprint "๐Ÿ“„ installer blueprint" -cat "$BLUEPRINT_FILE" - -# Prepare the blueprint for the compose. -greenprint "๐Ÿ“‹ Preparing installer blueprint" -sudo composer-cli blueprints push "$BLUEPRINT_FILE" -sudo composer-cli blueprints depsolve installer - -# Build installer image. -# Test --url arg following by URL with tailling slash for bz#1942029 -build_image installer "${INSTALLER_TYPE}" "${PROD_REPO_URL}/" - -# Download the image -greenprint "๐Ÿ“ฅ Downloading the installer image" -sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null -ISO_FILENAME="${COMPOSE_ID}-${INSTALLER_FILENAME}" -sudo mv "${ISO_FILENAME}" /var/lib/libvirt/images - -# Clean compose and blueprints. -greenprint "๐Ÿงน Clean up installer blueprint and compose" -sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null -sudo composer-cli blueprints delete installer > /dev/null - -######################################################## -## -## install rhel-edge image with installer(ISO) -## -######################################################## - -# Ensure SELinux is happy with our new images. -greenprint "๐Ÿ‘ฟ Running restorecon on image directory" -sudo restorecon -Rv /var/lib/libvirt/images/ - -# Create qcow2 file for virt install. -greenprint "๐Ÿ–ฅ Create qcow2 file for virt install" -LIBVIRT_BIOS_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-bios.qcow2 -LIBVIRT_UEFI_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-uefi.qcow2 -sudo qemu-img create -f qcow2 "${LIBVIRT_BIOS_IMAGE_PATH}" 20G -sudo qemu-img create -f qcow2 "${LIBVIRT_UEFI_IMAGE_PATH}" 20G - -# Write kickstart file for ostree image installation. -greenprint "๐Ÿ“‘ Generate kickstart file" -tee "$KS_FILE" > /dev/null << STOPHERE -text -network --bootproto=dhcp --device=link --activate --onboot=on - -zerombr -clearpart --all --initlabel --disklabel=msdos -autopart --nohome --noswap --type=plain -ostreesetup --nogpg --osname=rhel-edge --remote=rhel-edge --url=file:///ostree/repo --ref=${OSTREE_REF} -poweroff - -%post --log=/var/log/anaconda/post-install.log --erroronfail - -# no sudo password for user admin -echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers - -# delete local ostree repo and add external prod edge repo -ostree remote delete rhel-edge -ostree remote add --no-gpg-verify --no-sign-verify rhel-edge ${PROD_REPO_URL} -%end -STOPHERE - -################################################## -## -## Install and test Edge image on BIOS VM -## -################################################## -# Install ostree image via anaconda. -greenprint "๐Ÿ’ฟ Install ostree image via installer(ISO) on BIOS VM" -sudo virt-install --initrd-inject="${KS_FILE}" \ - --extra-args="inst.ks=file:/ks.cfg console=ttyS0,115200" \ - --name="${IMAGE_KEY}-bios" \ - --disk path="${LIBVIRT_BIOS_IMAGE_PATH}",format=qcow2 \ - --ram 3072 \ - --vcpus 2 \ - --network network=integration,mac=34:49:22:B0:83:30 \ - --os-type linux \ - --os-variant ${OS_VARIANT} \ - --location "/var/lib/libvirt/images/${ISO_FILENAME}" \ - --nographics \ - --noautoconsole \ - --wait=-1 \ - --noreboot - -# Start VM. -greenprint "๐Ÿ“Ÿ Start BIOS VM" -sudo virsh start "${IMAGE_KEY}-bios" - -# Check for ssh ready to go. -greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" -for LOOP_COUNTER in $(seq 0 30); do - RESULTS="$(wait_for_ssh_up $BIOS_GUEST_ADDRESS)" - if [[ $RESULTS == 1 ]]; then - echo "SSH is ready now! ๐Ÿฅณ" - break - fi - sleep 10 -done - -# Check image installation result -check_result - -# Get ostree commit value. -greenprint "๐Ÿ•น Get ostree install commit value" -INSTALL_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}") - -# Run Edge test on BIOS VM -# Add instance IP address into /etc/ansible/hosts -sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF -[ostree_guest] -${BIOS_GUEST_ADDRESS} - -[ostree_guest:vars] -ansible_python_interpreter=/usr/bin/python3 -ansible_user=admin -ansible_private_key_file=${SSH_KEY} -ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" -EOF - -# Test IoT/Edge OS -greenprint "๐Ÿ“ผ Run Edge tests on BIOS VM" -sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=rhel-edge -e ostree_commit="${INSTALL_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0 -check_result - -# Clean up BIOS VM -greenprint "๐Ÿงน Clean up BIOS VM" -if [[ $(sudo virsh domstate "${IMAGE_KEY}-bios") == "running" ]]; then - sudo virsh destroy "${IMAGE_KEY}-bios" -fi -sudo virsh undefine "${IMAGE_KEY}-bios" -sudo rm -f "$LIBVIRT_BIOS_IMAGE_PATH" - -################################################## -## -## Install, upgrade and test Edge image on UEFI VM -## -################################################## -# Install ostree image via anaconda. -greenprint "๐Ÿ’ฟ Install ostree image via installer(ISO) on UEFI VM" -sudo virt-install --initrd-inject="${KS_FILE}" \ - --extra-args="inst.ks=file:/ks.cfg console=ttyS0,115200" \ - --name="${IMAGE_KEY}-uefi"\ - --disk path="${LIBVIRT_UEFI_IMAGE_PATH}",format=qcow2 \ - --ram 3072 \ - --vcpus 2 \ - --network network=integration,mac=34:49:22:B0:83:31 \ - --os-type linux \ - --os-variant ${OS_VARIANT} \ - --location "/var/lib/libvirt/images/${ISO_FILENAME}" \ - --boot uefi,loader_ro=yes,loader_type=pflash,nvram_template=/usr/share/edk2/ovmf/OVMF_VARS.fd,loader_secure=no \ - --nographics \ - --noautoconsole \ - --wait=-1 \ - --noreboot - -# Start VM. -greenprint "๐Ÿ’ป Start UEFI VM" -sudo virsh start "${IMAGE_KEY}-uefi" - -# Check for ssh ready to go. -greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" -for LOOP_COUNTER in $(seq 0 30); do - RESULTS="$(wait_for_ssh_up $UEFI_GUEST_ADDRESS)" - if [[ $RESULTS == 1 ]]; then - echo "SSH is ready now! ๐Ÿฅณ" - break - fi - sleep 10 -done - -# Check image installation result -check_result - -################################################## -## -## upgrade rhel-edge with new upgrade commit -## -################################################## - -# Write a blueprint for ostree image. -tee "$BLUEPRINT_FILE" > /dev/null << EOF -name = "upgrade" -description = "An upgrade rhel-edge container image" -version = "0.0.2" -modules = [] -groups = [] - -[[packages]] -name = "python3" -version = "*" - -[[packages]] -name = "wget" -version = "*" - -[[customizations.user]] -name = "admin" -description = "Administrator account" -password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl." -key = "${SSH_KEY_PUB}" -home = "/home/admin/" -groups = ["wheel"] -EOF - -greenprint "๐Ÿ“„ upgrade blueprint" -cat "$BLUEPRINT_FILE" - -# Prepare the blueprint for the compose. -greenprint "๐Ÿ“‹ Preparing upgrade blueprint" -sudo composer-cli blueprints push "$BLUEPRINT_FILE" -sudo composer-cli blueprints depsolve upgrade - -# Build upgrade image. -build_image upgrade "${CONTAINER_TYPE}" "$PROD_REPO_URL" - -# Download the image -greenprint "๐Ÿ“ฅ Downloading the upgrade image" -sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null - -# Clear stage repo running env -greenprint "๐Ÿงน Clearing stage repo running env" -# Remove any status containers if exist -sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f -# Remove all images -sudo podman rmi -f -a - -# Deal with stage repo container -greenprint "๐Ÿ—œ Extracting image" -IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}" -sudo podman pull "oci-archive:${IMAGE_FILENAME}" -sudo podman images -# Clear image file -sudo rm -f "$IMAGE_FILENAME" - -# Run edge stage repo -greenprint "๐Ÿ›ฐ Running edge stage repo" -# Get image id to run image -EDGE_IMAGE_ID=$(sudo podman images --filter "dangling=true" --format "{{.ID}}") -sudo podman run -d --name rhel-edge --network edge --ip "$STAGE_REPO_ADDRESS" "$EDGE_IMAGE_ID" -# Wait for container to be running -until [ "$(sudo podman inspect -f '{{.State.Running}}' rhel-edge)" == "true" ]; do - sleep 1; -done; - -# Pull upgrade to prod mirror -greenprint "โ›“ Pull upgrade to prod mirror" -sudo ostree --repo="$PROD_REPO" pull --mirror edge-stage "$OSTREE_REF" -sudo ostree --repo="$PROD_REPO" static-delta generate "$OSTREE_REF" -sudo ostree --repo="$PROD_REPO" summary -u - -# Get ostree commit value. -greenprint "๐Ÿ•น Get ostree upgrade commit value" -UPGRADE_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}") - -# Clean compose and blueprints. -greenprint "๐Ÿงฝ Clean up upgrade blueprint and compose" -sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null -sudo composer-cli blueprints delete upgrade > /dev/null - -# Upgrade image/commit. -greenprint "๐Ÿ—ณ Upgrade ostree image/commit" -sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${UEFI_GUEST_ADDRESS} 'sudo rpm-ostree upgrade' -sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${UEFI_GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit' - -# Sleep 10 seconds here to make sure vm restarted already -sleep 10 - -# Check for ssh ready to go. -greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" -# shellcheck disable=SC2034 # Unused variables left for readability -for LOOP_COUNTER in $(seq 0 30); do - RESULTS="$(wait_for_ssh_up $UEFI_GUEST_ADDRESS)" - if [[ $RESULTS == 1 ]]; then - echo "SSH is ready now! ๐Ÿฅณ" - break - fi - sleep 10 -done - -# Check ostree upgrade result -check_result - -# Add instance IP address into /etc/ansible/hosts -sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF -[ostree_guest] -${UEFI_GUEST_ADDRESS} - -[ostree_guest:vars] -ansible_python_interpreter=/usr/bin/python3 -ansible_user=admin -ansible_private_key_file=${SSH_KEY} -ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" -EOF - -# Test IoT/Edge OS -sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=rhel-edge -e ostree_commit="${UPGRADE_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree_nort.yaml || RESULTS=0 -check_result - -# Final success clean up -clean_up - -exit 0