From 738e6877e0f7128816d6922dff06ead7bdf35358 Mon Sep 17 00:00:00 2001 From: Jakub Rusz Date: Tue, 2 Aug 2022 14:25:26 +0200 Subject: [PATCH] tests/ostree-rebase: split to uefi and bios This test used to spawn two VMs at the same time which requires more memory than the Openstack ci medium runner can provide. We want to be using only medium runners so this change is necesasry to allow that. --- .gitlab-ci.yml | 18 +- test/cases/ostree-rebase-bios.sh | 530 ++++++++++++++++++ ...ostree-rebase.sh => ostree-rebase-uefi.sh} | 91 +-- 3 files changed, 547 insertions(+), 92 deletions(-) create mode 100755 test/cases/ostree-rebase-bios.sh rename test/cases/{ostree-rebase.sh => ostree-rebase-uefi.sh} (84%) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 947c50385..be76ec339 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -378,12 +378,26 @@ OSTree raw image: - rhos-01/rhel-9.1-nightly-x86_64 - rhos-01/centos-stream-9-x86_64 -Rebase OSTree: +Rebase OSTree BIOS: stage: test extends: OSTree script: - schutzbot/deploy.sh - - /usr/libexec/tests/osbuild-composer/ostree-rebase.sh + - /usr/libexec/tests/osbuild-composer/ostree-rebase-bios.sh + parallel: + matrix: + - RUNNER: + - rhos-01/rhel-8.7-nightly-x86_64 + - rhos-01/rhel-9.1-nightly-x86_64 + - rhos-01/centos-stream-8-x86_64 + - rhos-01/centos-stream-9-x86_64 + +Rebase OSTree UEFI: + stage: test + extends: OSTree + script: + - schutzbot/deploy.sh + - /usr/libexec/tests/osbuild-composer/ostree-rebase-uefi.sh parallel: matrix: - RUNNER: diff --git a/test/cases/ostree-rebase-bios.sh b/test/cases/ostree-rebase-bios.sh new file mode 100755 index 000000000..2d508ea8a --- /dev/null +++ b/test/cases/ostree-rebase-bios.sh @@ -0,0 +1,530 @@ +#!/bin/bash +set -euo pipefail + +# Get OS data. +source /usr/libexec/osbuild-composer-test/set-env-variables.sh + +# Get compose url if it's running on unsubscried RHEL +if [[ ${ID} == "rhel" ]] && ! sudo subscription-manager status; then + source /usr/libexec/osbuild-composer-test/define-compose-url.sh +fi + +# Provision the software under test. +/usr/libexec/osbuild-composer-test/provision.sh none + +# Colorful output. +function greenprint { + echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" +} + +function get_build_info() { + key="$1" + fname="$2" + if rpm -q --quiet weldr-client; then + key=".body${key}" + fi + jq -r "${key}" "${fname}" +} + +# Start libvirtd and test it. +greenprint "๐Ÿš€ Starting libvirt daemon" +sudo systemctl start libvirtd +sudo virsh list --all > /dev/null + +# Set a customized dnsmasq configuration for libvirt so we always get the +# same address on bootup. +sudo tee /tmp/integration.xml > /dev/null << EOF + + integration + 1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579 + + + + + + + + + + + + + + +EOF +if ! sudo virsh net-info integration > /dev/null 2>&1; then + sudo virsh net-define /tmp/integration.xml +fi +if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then + sudo virsh net-start integration +fi + +# Allow anyone in the wheel group to talk to libvirt. +greenprint "๐Ÿšช Allowing users in wheel group to talk to libvirt" +sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF +polkit.addRule(function(action, subject) { + if (action.id == "org.libvirt.unix.manage" && + subject.isInGroup("adm")) { + return polkit.Result.YES; + } +}); +EOF + +# Set up variables. +TEST_UUID=$(uuidgen) +IMAGE_KEY="edge-${TEST_UUID}" +BIOS_GUEST_ADDRESS=192.168.100.50 +PROD_REPO_ADDRESS=192.168.200.1 +PROD_REPO_URL="http://${PROD_REPO_ADDRESS}:8080/repo/" +ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" +CONTAINER_TYPE=edge-container +CONTAINER_FILENAME=container.tar + +# Set up temporary files. +TEMPDIR=$(mktemp -d) +BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml +KS_FILE=${TEMPDIR}/ks.cfg +COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json +COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json + +# SSH setup. +SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5) +SSH_DATA_DIR=$(/usr/libexec/osbuild-composer-test/gen-ssh.sh) +SSH_KEY=${SSH_DATA_DIR}/id_rsa +SSH_KEY_PUB=$(cat "${SSH_KEY}".pub) + +case "${ID}-${VERSION_ID}" in + "rhel-8.7") + OSTREE_REF="rhel/8/${ARCH}/edge" + OS_VARIANT="rhel8-unknown" + # Use a stable installer image unless it's the nightly pipeline + BOOT_LOCATION="http://download.devel.redhat.com/released/rhel-8/RHEL-8/8.6.0/BaseOS/x86_64/os/" + if [ "${NIGHTLY:=false}" == "true" ]; then + BOOT_LOCATION="${COMPOSE_URL:-}/compose/BaseOS/x86_64/os/" + fi + PARENT_REF="rhel/8/${ARCH}/edge" + ;; + "rhel-9.1") + OSTREE_REF="rhel/9/${ARCH}/edge" + OS_VARIANT="rhel9-unknown" + # Use a stable installer image unless it's the nightly pipeline + BOOT_LOCATION="http://download.devel.redhat.com/released/rhel-9/RHEL-9/9.0.0/BaseOS/x86_64/os/" + if [ "${NIGHTLY:=false}" == "true" ]; then + BOOT_LOCATION="${COMPOSE_URL:-}/compose/BaseOS/x86_64/os/" + fi + PARENT_REF="rhel/9/${ARCH}/edge" + ;; + "centos-8") + OSTREE_REF="centos/8/${ARCH}/edge" + OS_VARIANT="centos8" + BOOT_LOCATION="http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os/" + PARENT_REF="centos/8/${ARCH}/edge" + ;; + "centos-9") + OSTREE_REF="centos/9/${ARCH}/edge" + OS_VARIANT="centos-stream9" + BOOT_LOCATION="https://odcs.stream.centos.org/production/latest-CentOS-Stream/compose/BaseOS/x86_64/os/" + PARENT_REF="centos/9/${ARCH}/edge" + ;; + *) + echo "unsupported distro: ${ID}-${VERSION_ID}" + exit 1;; +esac + +# Get the compose log. +get_compose_log () { + COMPOSE_ID=$1 + LOG_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log + + # Download the logs. + sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null +} + +# Get the compose metadata. +get_compose_metadata () { + COMPOSE_ID=$1 + METADATA_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json + + # Download the metadata. + sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null + + # Find the tarball and extract it. + TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")") + sudo tar -xf "$TARBALL" -C "${TEMPDIR}" + sudo rm -f "$TARBALL" + + # Move the JSON file into place. + sudo cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null +} + +# Build ostree image. +build_image() { + blueprint_name=$1 + image_type=$2 + + # Get worker unit file so we can watch the journal. + WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service") + sudo journalctl -af -n 1 -u "${WORKER_UNIT}" & + WORKER_JOURNAL_PID=$! + # Stop watching the worker journal when exiting. + trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT + + # Start the compose. + greenprint "๐Ÿš€ Starting compose" + if [ $# -eq 3 ]; then + repo_url=$3 + sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" --url "$repo_url" "$blueprint_name" "$image_type" | tee "$COMPOSE_START" + elif [ $# -eq 4 ]; then + repo_url=$3 + parent_ref=$4 + sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" --parent "$parent_ref" --url "$repo_url" "$blueprint_name" "$image_type" | tee "$COMPOSE_START" + else + sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" "$blueprint_name" "$image_type" | tee "$COMPOSE_START" + fi + COMPOSE_ID=$(get_build_info ".build_id" "$COMPOSE_START") + + # Wait for the compose to finish. + greenprint "โฑ Waiting for compose to finish: ${COMPOSE_ID}" + while true; do + sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null + COMPOSE_STATUS=$(get_build_info ".queue_status" "$COMPOSE_INFO") + + # Is the compose finished? + if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then + break + fi + + # Wait 30 seconds and try again. + sleep 5 + done + + # Capture the compose logs from osbuild. + greenprint "๐Ÿ’ฌ Getting compose log and metadata" + get_compose_log "$COMPOSE_ID" + get_compose_metadata "$COMPOSE_ID" + + # Kill the journal monitor immediately and remove the trap + sudo pkill -P ${WORKER_JOURNAL_PID} + trap - EXIT + + # Did the compose finish with success? + if [[ $COMPOSE_STATUS != FINISHED ]]; then + echo "Something went wrong with the compose. ๐Ÿ˜ข" + exit 1 + fi +} + +# Wait for the ssh server up to be. +wait_for_ssh_up () { + SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"') + if [[ $SSH_STATUS == READY ]]; then + echo 1 + else + echo 0 + fi +} + +# Clean up our mess. +clean_up () { + greenprint "๐Ÿงผ Cleaning up" + + # Clear BIOS vm + if [[ $(sudo virsh domstate "${IMAGE_KEY}-bios") == "running" ]]; then + sudo virsh destroy "${IMAGE_KEY}-bios" + fi + sudo virsh undefine "${IMAGE_KEY}-bios" + # Remove qcow2 file. + sudo sudo virsh vol-delete --pool images "${IMAGE_KEY}-bios.qcow2" + + # Remove all containers and images if exist + sudo podman system reset --force + + # Remomve tmp dir. + sudo rm -rf "$TEMPDIR" +} + +# Test result checking +check_result () { + greenprint "๐ŸŽ Checking for test result" + if [[ $RESULTS == 1 ]]; then + greenprint "๐Ÿ’š Success" + else + greenprint "โŒ Failed" + clean_up + exit 1 + fi +} + +# Prepare stage repo network +greenprint "๐Ÿ”ง Prepare stage repo network" +sudo podman network inspect edge >/dev/null 2>&1 || sudo podman network create --driver=bridge --subnet=192.168.200.0/24 --gateway=192.168.200.254 edge + +########################################################## +## +## Build edge-container image and start it in podman +## +########################################################## + +# Write a blueprint for ostree image. +tee "$BLUEPRINT_FILE" > /dev/null << EOF +name = "container" +description = "A base rhel-edge container image" +version = "0.0.1" +modules = [] +groups = [] + +[[packages]] +name = "python3" +version = "*" + +[[packages]] +name = "sssd" +version = "*" + +[[customizations.user]] +name = "admin" +description = "Administrator account" +password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl." +key = "${SSH_KEY_PUB}" +home = "/home/admin/" +groups = ["wheel"] +EOF + +greenprint "๐Ÿ“„ container blueprint" +cat "$BLUEPRINT_FILE" + +# Prepare the blueprint for the compose. +greenprint "๐Ÿ“‹ Preparing container blueprint" +sudo composer-cli blueprints push "$BLUEPRINT_FILE" +sudo composer-cli blueprints depsolve container + +# Build container image. +build_image container "${CONTAINER_TYPE}" + +# Download the image +greenprint "๐Ÿ“ฅ Downloading the container image" +sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null + +# Clear stage repo running env +greenprint "๐Ÿงน Clearing stage repo running env" +# Remove any status containers if exist +sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f +# Remove all images +sudo podman rmi -f -a + +# Deal with stage repo image +greenprint "๐Ÿ—œ Starting container" +IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}" +sudo podman pull "oci-archive:${IMAGE_FILENAME}" +sudo podman images +# Run edge stage repo +greenprint "๐Ÿ›ฐ Running edge stage repo" +# Get image id to run image +EDGE_IMAGE_ID=$(sudo podman images --filter "dangling=true" --format "{{.ID}}") +sudo podman run -d --name rhel-edge --network edge --ip "$PROD_REPO_ADDRESS" "$EDGE_IMAGE_ID" +# Clear image file +sudo rm -f "$IMAGE_FILENAME" + +# Wait for container to be running +until [ "$(sudo podman inspect -f '{{.State.Running}}' rhel-edge)" == "true" ]; do + sleep 1; +done; + +# Clean compose and blueprints. +greenprint "๐Ÿงฝ Clean up container blueprint and compose" +sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null +sudo composer-cli blueprints delete container > /dev/null + +# Ensure SELinux is happy with our new images. +greenprint "๐Ÿ‘ฟ Running restorecon on image directory" +sudo restorecon -Rv /var/lib/libvirt/images/ + +# Create bios vm qcow2 file for virt install. +greenprint "Create bios vm qcow2 file for virt install" +LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-bios.qcow2 +sudo qemu-img create -f qcow2 "${LIBVIRT_IMAGE_PATH}" 20G + +# Write kickstart file for ostree image installation. +greenprint "Generate kickstart file" +tee "$KS_FILE" > /dev/null << STOPHERE +text +network --bootproto=dhcp --device=link --activate --onboot=on + +zerombr +clearpart --all --initlabel --disklabel=msdos +autopart --nohome --noswap --type=plain +rootpw --lock --iscrypted locked +ostreesetup --nogpg --osname=rhel-edge --remote=rhel-edge --url=${PROD_REPO_URL} --ref=${OSTREE_REF} +poweroff + +%post --log=/var/log/anaconda/post-install.log --erroronfail +# no sudo password for SSH user +echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers +%end +STOPHERE + +# Install ostree image via anaconda on BIOS vm. +greenprint "Install ostree image via anaconda on BIOS vm" +sudo virt-install --initrd-inject="${KS_FILE}" \ + --extra-args="inst.ks=file:/ks.cfg console=ttyS0,115200" \ + --name="${IMAGE_KEY}-bios"\ + --disk path="${LIBVIRT_IMAGE_PATH}",format=qcow2 \ + --ram 3072 \ + --vcpus 2 \ + --network network=integration,mac=34:49:22:B0:83:30 \ + --os-type linux \ + --os-variant ${OS_VARIANT} \ + --location "${BOOT_LOCATION}" \ + --nographics \ + --noautoconsole \ + --wait=-1 \ + --noreboot + +# Start VM. +greenprint "Start VM" +sudo virsh start "${IMAGE_KEY}-bios" + +# Check for ssh ready to go. +greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" +for LOOP_COUNTER in $(seq 0 30); do + RESULTS="$(wait_for_ssh_up $BIOS_GUEST_ADDRESS)" + if [[ $RESULTS == 1 ]]; then + echo "SSH is ready now! ๐Ÿฅณ" + break + fi + sleep 10 +done + +# Check image installation result +check_result + +################################################################## +## +## Build upgrade image with new ref +## +################################################################## + +# Write a blueprint for ostree image. +tee "$BLUEPRINT_FILE" > /dev/null << EOF +name = "upgrade" +description = "An upgrade rhel-edge container image" +version = "0.0.2" +modules = [] +groups = [] + +[[packages]] +name = "python3" +version = "*" + +[[packages]] +name = "sssd" +version = "*" + +[[packages]] +name = "wget" +version = "*" + +[[customizations.user]] +name = "admin" +description = "Administrator account" +password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl." +home = "/home/admin/" +key = "${SSH_KEY_PUB}" +groups = ["wheel"] + +[customizations.kernel] +name = "kernel-rt" +EOF + +greenprint "๐Ÿ“„ upgrade blueprint" +cat "$BLUEPRINT_FILE" + +# Prepare the blueprint for the compose. +greenprint "๐Ÿ“‹ Preparing upgrade blueprint" +sudo composer-cli blueprints push "$BLUEPRINT_FILE" +sudo composer-cli blueprints depsolve upgrade + +# Build upgrade image. +OSTREE_REF="test/rhel/x/${ARCH}/edge" +build_image upgrade "${CONTAINER_TYPE}" "$PROD_REPO_URL" "$PARENT_REF" + +# Download the image +greenprint "๐Ÿ“ฅ Downloading the upgrade image" +sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null + +# Clear stage repo running env +greenprint "๐Ÿงน Clearing stage repo running env" +# Remove any status containers if exist +sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f +# Remove all images +sudo podman rmi -f -a + +# Deal with stage repo container +greenprint "๐Ÿ—œ Extracting image" +IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}" +sudo podman pull "oci-archive:${IMAGE_FILENAME}" +sudo podman images +# Clear image file +sudo rm -f "$IMAGE_FILENAME" + +# Run edge stage repo +greenprint "๐Ÿ›ฐ Running edge stage repo" +# Get image id to run image +EDGE_IMAGE_ID=$(sudo podman images --filter "dangling=true" --format "{{.ID}}") +sudo podman run -d --name rhel-edge --network edge --ip "$PROD_REPO_ADDRESS" "$EDGE_IMAGE_ID" +# Wait for container to be running +until [ "$(sudo podman inspect -f '{{.State.Running}}' rhel-edge)" == "true" ]; do + sleep 1; +done; + +# Get ostree commit value. +greenprint "๐Ÿ•น Get ostree upgrade commit value" +UPGRADE_HASH=$(curl "${PROD_REPO_URL}refs/heads/${OSTREE_REF}") + +# Clean compose and blueprints. +greenprint "๐Ÿงฝ Clean up upgrade blueprint and compose" +sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null +sudo composer-cli blueprints delete upgrade > /dev/null + +# Rebase with new ref on BIOS vm +greenprint "๐Ÿ—ณ Rebase with new ref on BIOS vm" +sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${BIOS_GUEST_ADDRESS} "sudo rpm-ostree rebase rhel-edge:${OSTREE_REF}" +sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${BIOS_GUEST_ADDRESS} "nohup sudo -S systemctl reboot &>/dev/null & exit" + +# Sleep 10 seconds here to make sure vm restarted already +sleep 10 + +# Check for ssh ready to go. +greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" +# shellcheck disable=SC2034 # Unused variables left for readability +for LOOP_COUNTER in $(seq 0 30); do + RESULTS="$(wait_for_ssh_up $BIOS_GUEST_ADDRESS)" + if [[ $RESULTS == 1 ]]; then + echo "SSH is ready now! ๐Ÿฅณ" + break + fi + sleep 10 +done + +# Check ostree upgrade result +check_result + +# Add instance IP address into /etc/ansible/hosts +sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF +[ostree_guest] +${BIOS_GUEST_ADDRESS} +[ostree_guest:vars] +ansible_python_interpreter=/usr/bin/python3 +ansible_user=admin +ansible_private_key_file=${SSH_KEY} +ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" +EOF + +# Test IoT/Edge OS +sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=rhel-edge -e ostree_commit="${UPGRADE_HASH}" -e ostree_ref="rhel-edge:${OSTREE_REF}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0 +check_result + +# Final success clean up +clean_up + +exit 0 diff --git a/test/cases/ostree-rebase.sh b/test/cases/ostree-rebase-uefi.sh similarity index 84% rename from test/cases/ostree-rebase.sh rename to test/cases/ostree-rebase-uefi.sh index c6441309c..283bb1124 100755 --- a/test/cases/ostree-rebase.sh +++ b/test/cases/ostree-rebase-uefi.sh @@ -47,7 +47,6 @@ sudo tee /tmp/integration.xml > /dev/null << EOF - @@ -74,7 +73,6 @@ EOF # Set up variables. TEST_UUID=$(uuidgen) IMAGE_KEY="edge-${TEST_UUID}" -BIOS_GUEST_ADDRESS=192.168.100.50 UEFI_GUEST_ADDRESS=192.168.100.51 PROD_REPO_ADDRESS=192.168.200.1 PROD_REPO_URL="http://${PROD_REPO_ADDRESS}:8080/repo/" @@ -231,16 +229,7 @@ wait_for_ssh_up () { clean_up () { greenprint "๐Ÿงผ Cleaning up" - # Clear vm - # BIOS vm - if [[ $(sudo virsh domstate "${IMAGE_KEY}-bios") == "running" ]]; then - sudo virsh destroy "${IMAGE_KEY}-bios" - fi - sudo virsh undefine "${IMAGE_KEY}-bios" - # Remove qcow2 file. - sudo sudo virsh vol-delete --pool images "${IMAGE_KEY}-bios.qcow2" - - #UEFI vm + # Clear UEFI vm if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then sudo virsh destroy "${IMAGE_KEY}-uefi" fi @@ -351,11 +340,6 @@ sudo composer-cli blueprints delete container > /dev/null greenprint "๐Ÿ‘ฟ Running restorecon on image directory" sudo restorecon -Rv /var/lib/libvirt/images/ -# Create bios vm qcow2 file for virt install. -greenprint "Create bios vm qcow2 file for virt install" -LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-bios.qcow2 -sudo qemu-img create -f qcow2 "${LIBVIRT_IMAGE_PATH}" 20G - # Write kickstart file for ostree image installation. greenprint "Generate kickstart file" tee "$KS_FILE" > /dev/null << STOPHERE @@ -375,41 +359,6 @@ echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers %end STOPHERE -# Install ostree image via anaconda on BIOS vm. -greenprint "Install ostree image via anaconda on BIOS vm" -sudo virt-install --initrd-inject="${KS_FILE}" \ - --extra-args="inst.ks=file:/ks.cfg console=ttyS0,115200" \ - --name="${IMAGE_KEY}-bios"\ - --disk path="${LIBVIRT_IMAGE_PATH}",format=qcow2 \ - --ram 3072 \ - --vcpus 2 \ - --network network=integration,mac=34:49:22:B0:83:30 \ - --os-type linux \ - --os-variant ${OS_VARIANT} \ - --location "${BOOT_LOCATION}" \ - --nographics \ - --noautoconsole \ - --wait=-1 \ - --noreboot - -# Start VM. -greenprint "Start VM" -sudo virsh start "${IMAGE_KEY}-bios" - -# Check for ssh ready to go. -greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" -for LOOP_COUNTER in $(seq 0 30); do - RESULTS="$(wait_for_ssh_up $BIOS_GUEST_ADDRESS)" - if [[ $RESULTS == 1 ]]; then - echo "SSH is ready now! ๐Ÿฅณ" - break - fi - sleep 10 -done - -# Check image installation result -check_result - # Create uefi vm qcow2 file for virt install. greenprint "Create uefi vm qcow2 file for virt install" LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-uefi.qcow2 @@ -539,44 +488,6 @@ greenprint "๐Ÿงฝ Clean up upgrade blueprint and compose" sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null sudo composer-cli blueprints delete upgrade > /dev/null -# Rebase with new ref on BIOS vm -greenprint "๐Ÿ—ณ Rebase with new ref on BIOS vm" -sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${BIOS_GUEST_ADDRESS} "sudo rpm-ostree rebase rhel-edge:${OSTREE_REF}" -sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${BIOS_GUEST_ADDRESS} "nohup sudo -S systemctl reboot &>/dev/null & exit" - -# Sleep 10 seconds here to make sure vm restarted already -sleep 10 - -# Check for ssh ready to go. -greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" -# shellcheck disable=SC2034 # Unused variables left for readability -for LOOP_COUNTER in $(seq 0 30); do - RESULTS="$(wait_for_ssh_up $BIOS_GUEST_ADDRESS)" - if [[ $RESULTS == 1 ]]; then - echo "SSH is ready now! ๐Ÿฅณ" - break - fi - sleep 10 -done - -# Check ostree upgrade result -check_result - -# Add instance IP address into /etc/ansible/hosts -sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF -[ostree_guest] -${BIOS_GUEST_ADDRESS} -[ostree_guest:vars] -ansible_python_interpreter=/usr/bin/python3 -ansible_user=admin -ansible_private_key_file=${SSH_KEY} -ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" -EOF - -# Test IoT/Edge OS -sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=rhel-edge -e ostree_commit="${UPGRADE_HASH}" -e ostree_ref="rhel-edge:${OSTREE_REF}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0 -check_result - # Rebase with new ref on UEFI vm greenprint "๐Ÿ—ณ Rebase with new ref on UEFI vm" sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${UEFI_GUEST_ADDRESS} "sudo rpm-ostree rebase rhel-edge:${OSTREE_REF}"