We want to be able to safely gather any artifacts without worrying about any possible secrets leaking. Every artifacts that we want to upload will now have to be placed in /tmp/artifacts which will then be uploaded to S3 by the executor and link to the artifacts will be provided in the logs. Only people with access to our AWS account can see them.
741 lines
23 KiB
Bash
Executable file
741 lines
23 KiB
Bash
Executable file
#!/bin/bash
|
|
set -euo pipefail
|
|
|
|
# Provision the software under test.
|
|
/usr/libexec/osbuild-composer-test/provision.sh
|
|
|
|
# Get OS data.
|
|
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
|
|
|
|
# Colorful output.
|
|
function greenprint {
|
|
echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
|
|
}
|
|
|
|
function get_build_info() {
|
|
key="$1"
|
|
fname="$2"
|
|
if rpm -q --quiet weldr-client; then
|
|
key=".body${key}"
|
|
fi
|
|
jq -r "${key}" "${fname}"
|
|
}
|
|
|
|
|
|
# Install openshift client
|
|
greenprint "🔧 Installing oenshift client(oc)"
|
|
curl https://osbuild-storage.s3.amazonaws.com/oc-4.9.0-linux.tar.gz | sudo tar -xz -C /usr/local/bin/
|
|
|
|
# Start libvirtd and test it.
|
|
greenprint "🚀 Starting libvirt daemon"
|
|
sudo systemctl start libvirtd
|
|
sudo virsh list --all > /dev/null
|
|
|
|
# Set a customized dnsmasq configuration for libvirt so we always get the
|
|
# same address on bootup.
|
|
sudo tee /tmp/integration.xml > /dev/null << EOF
|
|
<network>
|
|
<name>integration</name>
|
|
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
|
|
<forward mode='nat'>
|
|
<nat>
|
|
<port start='1024' end='65535'/>
|
|
</nat>
|
|
</forward>
|
|
<bridge name='integration' stp='on' delay='0'/>
|
|
<mac address='52:54:00:36:46:ef'/>
|
|
<ip address='192.168.100.1' netmask='255.255.255.0'>
|
|
<dhcp>
|
|
<range start='192.168.100.2' end='192.168.100.254'/>
|
|
<host mac='34:49:22:B0:83:30' name='vm-bios' ip='192.168.100.50'/>
|
|
<host mac='34:49:22:B0:83:31' name='vm-uefi' ip='192.168.100.51'/>
|
|
</dhcp>
|
|
</ip>
|
|
</network>
|
|
EOF
|
|
if ! sudo virsh net-info integration > /dev/null 2>&1; then
|
|
sudo virsh net-define /tmp/integration.xml
|
|
fi
|
|
if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then
|
|
sudo virsh net-start integration
|
|
fi
|
|
|
|
# Allow anyone in the wheel group to talk to libvirt.
|
|
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
|
|
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
|
|
polkit.addRule(function(action, subject) {
|
|
if (action.id == "org.libvirt.unix.manage" &&
|
|
subject.isInGroup("adm")) {
|
|
return polkit.Result.YES;
|
|
}
|
|
});
|
|
EOF
|
|
|
|
# Set up variables.
|
|
TEST_UUID=$(uuidgen)
|
|
IMAGE_KEY="osbuild-composer-ostree-test-${TEST_UUID}"
|
|
BIOS_GUEST_ADDRESS=192.168.100.50
|
|
UEFI_GUEST_ADDRESS=192.168.100.51
|
|
PROD_REPO_URL=http://192.168.100.1/repo
|
|
PROD_REPO=/var/www/html/repo
|
|
STAGE_REPO_ADDRESS=192.168.200.1
|
|
STAGE_REPO_URL="http://${STAGE_REPO_ADDRESS}:8080/repo/"
|
|
QUAY_REPO_URL="quay.io/osbuild/testing-rhel-edge-push"
|
|
QUAY_REPO_TAG=$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')
|
|
STAGE_OCP4_SERVER_NAME="edge-stage-server"
|
|
STAGE_OCP4_REPO_URL="http://${STAGE_OCP4_SERVER_NAME}-${QUAY_REPO_TAG}-frontdoor.apps.ocp.ci.centos.org/repo/"
|
|
ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}"
|
|
# For CS8, CS9, RHEL 8.5 and above
|
|
CONTAINER_TYPE=edge-container
|
|
CONTAINER_FILENAME=container.tar
|
|
INSTALLER_TYPE=edge-installer
|
|
INSTALLER_FILENAME=installer.iso
|
|
ANSIBLE_USER_FOR_BIOS="installeruser"
|
|
OSTREE_OSNAME=rhel
|
|
|
|
# Set up temporary files.
|
|
TEMPDIR=$(mktemp -d)
|
|
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
|
|
QUAY_CONFIG=${TEMPDIR}/quay_config.toml
|
|
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
|
|
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
|
|
|
|
# SSH setup.
|
|
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5)
|
|
SSH_DATA_DIR=$(/usr/libexec/osbuild-composer-test/gen-ssh.sh)
|
|
SSH_KEY=${SSH_DATA_DIR}/id_rsa
|
|
SSH_KEY_PUB=$(cat "${SSH_KEY}".pub)
|
|
|
|
case "${ID}-${VERSION_ID}" in
|
|
"fedora-35")
|
|
CONTAINER_TYPE=fedora-iot-container
|
|
INSTALLER_TYPE=fedora-iot-installer
|
|
OSTREE_REF="fedora/35/${ARCH}/iot"
|
|
OSTREE_OSNAME=fedora
|
|
OS_VARIANT="fedora35"
|
|
;;
|
|
"rhel-8.7")
|
|
OSTREE_REF="test/rhel/8/${ARCH}/edge"
|
|
OS_VARIANT="rhel8-unknown"
|
|
;;
|
|
"rhel-9.1")
|
|
OSTREE_REF="test/rhel/9/${ARCH}/edge"
|
|
OS_VARIANT="rhel9-unknown"
|
|
;;
|
|
"centos-8")
|
|
OSTREE_REF="test/centos/8/${ARCH}/edge"
|
|
OS_VARIANT="centos8"
|
|
;;
|
|
"centos-9")
|
|
OSTREE_REF="test/centos/9/${ARCH}/edge"
|
|
OS_VARIANT="centos-stream9"
|
|
;;
|
|
*)
|
|
echo "unsupported distro: ${ID}-${VERSION_ID}"
|
|
exit 1;;
|
|
esac
|
|
|
|
# modify existing kickstart by prepending and appending commands
|
|
function modksiso {
|
|
sudo dnf install -y lorax # for mkksiso
|
|
isomount=$(mktemp -d)
|
|
kspath=$(mktemp -d)
|
|
|
|
iso="$1"
|
|
newiso="$2"
|
|
|
|
echo "Mounting ${iso} -> ${isomount}"
|
|
sudo mount -v -o ro "${iso}" "${isomount}"
|
|
|
|
cleanup() {
|
|
sudo umount -v "${isomount}"
|
|
rmdir -v "${isomount}"
|
|
rm -rv "${kspath}"
|
|
}
|
|
|
|
trap cleanup RETURN
|
|
|
|
ksfiles=("${isomount}"/*.ks)
|
|
ksfile="${ksfiles[0]}" # there shouldn't be more than one anyway
|
|
echo "Found kickstart file ${ksfile}"
|
|
|
|
ksbase=$(basename "${ksfile}")
|
|
newksfile="${kspath}/${ksbase}"
|
|
oldks=$(cat "${ksfile}")
|
|
echo "Preparing modified kickstart file"
|
|
cat > "${newksfile}" << EOFKS
|
|
text
|
|
network --bootproto=dhcp --device=link --activate --onboot=on
|
|
zerombr
|
|
clearpart --all --initlabel --disklabel=msdos
|
|
autopart --nohome --noswap --type=plain
|
|
${oldks}
|
|
poweroff
|
|
%post --log=/var/log/anaconda/post-install.log --erroronfail
|
|
# no sudo password for user admin and installeruser
|
|
echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
|
|
echo -e 'installeruser\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
|
|
# delete local ostree repo and add external prod edge repo
|
|
ostree remote delete ${OSTREE_OSNAME}
|
|
ostree remote add --no-gpg-verify --no-sign-verify ${OSTREE_OSNAME} ${PROD_REPO_URL}
|
|
%end
|
|
EOFKS
|
|
|
|
echo "Writing new ISO"
|
|
sudo mkksiso -c "console=ttyS0,115200" "${newksfile}" "${iso}" "${newiso}"
|
|
|
|
echo "==== NEW KICKSTART FILE ===="
|
|
cat "${newksfile}"
|
|
echo "============================"
|
|
}
|
|
|
|
# Get the compose log.
|
|
get_compose_log () {
|
|
COMPOSE_ID=$1
|
|
LOG_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log
|
|
|
|
# Download the logs.
|
|
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
|
|
}
|
|
|
|
# Get the compose metadata.
|
|
get_compose_metadata () {
|
|
COMPOSE_ID=$1
|
|
METADATA_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json
|
|
|
|
# Download the metadata.
|
|
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
|
|
|
|
# Find the tarball and extract it.
|
|
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
|
|
sudo tar -xf "$TARBALL" -C "${TEMPDIR}"
|
|
sudo rm -f "$TARBALL"
|
|
|
|
# Move the JSON file into place.
|
|
sudo cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
|
|
}
|
|
|
|
# Build ostree image.
|
|
build_image() {
|
|
blueprint_name=$1
|
|
image_type=$2
|
|
|
|
# Get worker unit file so we can watch the journal.
|
|
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
|
|
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
|
|
WORKER_JOURNAL_PID=$!
|
|
# Stop watching the worker journal when exiting.
|
|
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
|
|
|
|
# Start the compose.
|
|
greenprint "🚀 Starting compose"
|
|
if [ $# -eq 2 ]; then
|
|
sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" "$blueprint_name" "$image_type" | tee "$COMPOSE_START"
|
|
fi
|
|
if [ $# -eq 3 ]; then
|
|
repo_url=$3
|
|
sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" --url "$repo_url" "$blueprint_name" "$image_type" | tee "$COMPOSE_START"
|
|
fi
|
|
if [ $# -eq 4 ]; then
|
|
image_repo_url=$3
|
|
registry_config=$4
|
|
sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" "$blueprint_name" "$image_type" "$image_repo_url" "$registry_config" | tee "$COMPOSE_START"
|
|
fi
|
|
COMPOSE_ID=$(get_build_info ".build_id" "$COMPOSE_START")
|
|
|
|
# Wait for the compose to finish.
|
|
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
|
|
while true; do
|
|
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
|
|
COMPOSE_STATUS=$(get_build_info ".queue_status" "$COMPOSE_INFO")
|
|
|
|
# Is the compose finished?
|
|
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
|
|
break
|
|
fi
|
|
|
|
# Wait 30 seconds and try again.
|
|
sleep 5
|
|
done
|
|
|
|
# Capture the compose logs from osbuild.
|
|
greenprint "💬 Getting compose log and metadata"
|
|
get_compose_log "$COMPOSE_ID"
|
|
get_compose_metadata "$COMPOSE_ID"
|
|
|
|
# Kill the journal monitor immediately and remove the trap
|
|
sudo pkill -P ${WORKER_JOURNAL_PID}
|
|
trap - EXIT
|
|
|
|
# Did the compose finish with success?
|
|
if [[ $COMPOSE_STATUS != FINISHED ]]; then
|
|
echo "Something went wrong with the compose. 😢"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Wait for the ssh server up to be.
|
|
# Test user admin added by edge-container bp
|
|
wait_for_ssh_up () {
|
|
SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"')
|
|
if [[ $SSH_STATUS == READY ]]; then
|
|
echo 1
|
|
else
|
|
echo 0
|
|
fi
|
|
}
|
|
|
|
# Clean up our mess.
|
|
clean_up () {
|
|
greenprint "🧼 Cleaning up"
|
|
# Remove tag from quay.io repo
|
|
skopeo delete --creds "${V2_QUAY_USERNAME}:${V2_QUAY_PASSWORD}" "docker://${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
|
|
|
# Clear vm
|
|
if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then
|
|
sudo virsh destroy "${IMAGE_KEY}-uefi"
|
|
fi
|
|
sudo virsh undefine "${IMAGE_KEY}-uefi" --nvram
|
|
# Remove qcow2 file.
|
|
sudo rm -f "$LIBVIRT_UEFI_IMAGE_PATH"
|
|
|
|
# Remove any status containers if exist
|
|
sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f
|
|
# Remove all images
|
|
sudo podman rmi -f -a
|
|
|
|
# Remove prod repo
|
|
sudo rm -rf "$PROD_REPO"
|
|
|
|
# Remomve tmp dir.
|
|
sudo rm -rf "$TEMPDIR"
|
|
|
|
# Stop prod repo http service
|
|
sudo systemctl disable --now httpd
|
|
}
|
|
|
|
# Test result checking
|
|
check_result () {
|
|
greenprint "🎏 Checking for test result"
|
|
if [[ $RESULTS == 1 ]]; then
|
|
greenprint "💚 Success"
|
|
else
|
|
greenprint "❌ Failed"
|
|
clean_up
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
###########################################################
|
|
##
|
|
## Prepare edge prod and stage repo
|
|
##
|
|
###########################################################
|
|
greenprint "🔧 Prepare edge prod repo"
|
|
# Start prod repo web service
|
|
# osbuild-composer-tests have mod_ssl as a dependency. The package installs
|
|
# an example configuration which automatically enabled httpd on port 443, but
|
|
# that one is already in use. Remove the default configuration as it is useless
|
|
# anyway.
|
|
sudo rm -f /etc/httpd/conf.d/ssl.conf
|
|
sudo systemctl enable --now httpd.service
|
|
|
|
# Have a clean prod repo
|
|
sudo rm -rf "$PROD_REPO"
|
|
sudo mkdir -p "$PROD_REPO"
|
|
sudo ostree --repo="$PROD_REPO" init --mode=archive
|
|
sudo ostree --repo="$PROD_REPO" remote add --no-gpg-verify edge-stage "$STAGE_REPO_URL"
|
|
sudo ostree --repo="$PROD_REPO" remote add --no-gpg-verify edge-stage-ocp4 "$STAGE_OCP4_REPO_URL"
|
|
|
|
# Prepare stage repo network
|
|
greenprint "🔧 Prepare stage repo network"
|
|
sudo podman network inspect edge >/dev/null 2>&1 || sudo podman network create --driver=bridge --subnet=192.168.200.0/24 --gateway=192.168.200.254 edge
|
|
|
|
##########################################################
|
|
##
|
|
## rhel-edge container image for building installer image
|
|
##
|
|
##########################################################
|
|
|
|
# Write a blueprint for ostree image.
|
|
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
|
name = "container"
|
|
description = "A base rhel-edge container image"
|
|
version = "0.0.1"
|
|
modules = []
|
|
groups = []
|
|
|
|
[[packages]]
|
|
name = "python3"
|
|
version = "*"
|
|
|
|
[[packages]]
|
|
name = "sssd"
|
|
version = "*"
|
|
|
|
[[customizations.user]]
|
|
name = "admin"
|
|
description = "Administrator account"
|
|
password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl."
|
|
key = "${SSH_KEY_PUB}"
|
|
home = "/home/admin/"
|
|
groups = ["wheel"]
|
|
EOF
|
|
|
|
greenprint "📄 container blueprint"
|
|
cat "$BLUEPRINT_FILE"
|
|
|
|
# Prepare the blueprint for the compose.
|
|
greenprint "📋 Preparing container blueprint"
|
|
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
|
|
sudo composer-cli blueprints depsolve container
|
|
|
|
# Write the registry configuration.
|
|
greenprint "📄 Perparing quay.io config to push image"
|
|
tee "$QUAY_CONFIG" > /dev/null << EOF
|
|
provider = "container"
|
|
[settings]
|
|
username = "$V2_QUAY_USERNAME"
|
|
password = "$V2_QUAY_PASSWORD"
|
|
EOF
|
|
|
|
# Build container image.
|
|
build_image container "$CONTAINER_TYPE" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}" "$QUAY_CONFIG"
|
|
|
|
# Run stage repo in OCP4
|
|
greenprint "Running stage repo in OCP4"
|
|
oc login --token="${OCP_SA_TOKEN}" --server=https://api.ocp.ci.centos.org:6443 -n frontdoor
|
|
oc process -f /usr/share/tests/osbuild-composer/openshift/edge-stage-server-template.yaml -p EDGE_STAGE_REPO_TAG="${QUAY_REPO_TAG}" -p EDGE_STAGE_SERVER_NAME="${STAGE_OCP4_SERVER_NAME}" | oc apply -f - || true
|
|
|
|
# Wait until stage repo ready to use
|
|
greenprint "Wait until stage repo is ready"
|
|
for LOOP_COUNTER in $(seq 0 60); do
|
|
RETURN_CODE=$(curl -o /dev/null -s -w "%{http_code}" "${STAGE_OCP4_REPO_URL}refs/heads/${OSTREE_REF}")
|
|
if [[ $RETURN_CODE == 200 ]]; then
|
|
echo "Stage repo is ready"
|
|
break
|
|
fi
|
|
sleep 20
|
|
done
|
|
|
|
# Sync installer edge content
|
|
greenprint "📡 Sync installer content from stage repo"
|
|
sudo ostree --repo="$PROD_REPO" pull --mirror edge-stage-ocp4 "$OSTREE_REF"
|
|
|
|
# Clean compose and blueprints.
|
|
greenprint "🧽 Clean up container blueprint and compose"
|
|
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
|
sudo composer-cli blueprints delete container > /dev/null
|
|
|
|
# Clean up OCP4
|
|
greenprint " Clean up OCP4"
|
|
oc delete pod,rc,service,route,dc -l app="${STAGE_OCP4_SERVER_NAME}-${QUAY_REPO_TAG}"
|
|
|
|
########################################################
|
|
##
|
|
## rhel-edge installer image building from container image
|
|
##
|
|
########################################################
|
|
|
|
# Write a blueprint for installer image.
|
|
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
|
name = "installer"
|
|
description = "A rhel-edge installer image"
|
|
version = "0.0.1"
|
|
modules = []
|
|
groups = []
|
|
|
|
[[customizations.user]]
|
|
name = "installeruser"
|
|
description = "Added by installer blueprint"
|
|
password = "\$6\$GRmb7S0p8vsYmXzH\$o0E020S.9JQGaHkszoog4ha4AQVs3sk8q0DvLjSMxoxHBKnB2FBXGQ/OkwZQfW/76ktHd0NX5nls2LPxPuUdl."
|
|
key = "${SSH_KEY_PUB}"
|
|
home = "/home/installeruser/"
|
|
groups = ["wheel"]
|
|
EOF
|
|
|
|
greenprint "📄 installer blueprint"
|
|
cat "$BLUEPRINT_FILE"
|
|
|
|
# Prepare the blueprint for the compose.
|
|
greenprint "📋 Preparing installer blueprint"
|
|
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
|
|
sudo composer-cli blueprints depsolve installer
|
|
|
|
# Build installer image.
|
|
# Test --url arg following by URL with tailling slash for bz#1942029
|
|
build_image installer "${INSTALLER_TYPE}" "${PROD_REPO_URL}/"
|
|
|
|
# Download the image
|
|
greenprint "📥 Downloading the installer image"
|
|
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
|
|
ISO_FILENAME="${COMPOSE_ID}-${INSTALLER_FILENAME}"
|
|
modksiso "${ISO_FILENAME}" "/var/lib/libvirt/images/${ISO_FILENAME}"
|
|
sudo rm "${ISO_FILENAME}"
|
|
|
|
# Clean compose and blueprints.
|
|
greenprint "🧹 Clean up installer blueprint and compose"
|
|
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
|
sudo composer-cli blueprints delete installer > /dev/null
|
|
|
|
########################################################
|
|
##
|
|
## install rhel-edge image with installer(ISO)
|
|
##
|
|
########################################################
|
|
|
|
# Ensure SELinux is happy with our new images.
|
|
greenprint "👿 Running restorecon on image directory"
|
|
sudo restorecon -Rv /var/lib/libvirt/images/
|
|
|
|
# Create qcow2 file for virt install.
|
|
greenprint "🖥 Create qcow2 file for virt install"
|
|
LIBVIRT_BIOS_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-bios.qcow2
|
|
LIBVIRT_UEFI_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}-uefi.qcow2
|
|
sudo qemu-img create -f qcow2 "${LIBVIRT_BIOS_IMAGE_PATH}" 20G
|
|
sudo qemu-img create -f qcow2 "${LIBVIRT_UEFI_IMAGE_PATH}" 20G
|
|
|
|
##################################################
|
|
##
|
|
## Install and test Edge image on BIOS VM
|
|
##
|
|
##################################################
|
|
# Install ostree image via anaconda.
|
|
greenprint "💿 Install ostree image via installer(ISO) on BIOS VM"
|
|
sudo virt-install --name="${IMAGE_KEY}-bios" \
|
|
--disk path="${LIBVIRT_BIOS_IMAGE_PATH}",format=qcow2 \
|
|
--ram 3072 \
|
|
--vcpus 2 \
|
|
--network network=integration,mac=34:49:22:B0:83:30 \
|
|
--os-type linux \
|
|
--os-variant ${OS_VARIANT} \
|
|
--cdrom "/var/lib/libvirt/images/${ISO_FILENAME}" \
|
|
--nographics \
|
|
--noautoconsole \
|
|
--wait=-1 \
|
|
--noreboot
|
|
|
|
# Start VM.
|
|
greenprint "📟 Start BIOS VM"
|
|
sudo virsh start "${IMAGE_KEY}-bios"
|
|
|
|
# Check for ssh ready to go.
|
|
greenprint "🛃 Checking for SSH is ready to go"
|
|
for LOOP_COUNTER in $(seq 0 30); do
|
|
RESULTS="$(wait_for_ssh_up $BIOS_GUEST_ADDRESS)"
|
|
if [[ $RESULTS == 1 ]]; then
|
|
echo "SSH is ready now! 🥳"
|
|
break
|
|
fi
|
|
sleep 10
|
|
done
|
|
|
|
# Check image installation result
|
|
check_result
|
|
|
|
# Get ostree commit value.
|
|
greenprint "🕹 Get ostree install commit value"
|
|
INSTALL_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}")
|
|
|
|
# Run Edge test on BIOS VM
|
|
# Add instance IP address into /etc/ansible/hosts
|
|
# Run BIOS VM test with installeruser added by edge-installer bp as ansible user
|
|
sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF
|
|
[ostree_guest]
|
|
${BIOS_GUEST_ADDRESS}
|
|
|
|
[ostree_guest:vars]
|
|
ansible_python_interpreter=/usr/bin/python3
|
|
ansible_user=${ANSIBLE_USER_FOR_BIOS}
|
|
ansible_private_key_file=${SSH_KEY}
|
|
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
|
EOF
|
|
|
|
# Test IoT/Edge OS
|
|
greenprint "📼 Run Edge tests on BIOS VM"
|
|
sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type="$OSTREE_OSNAME" -e ostree_commit="${INSTALL_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0
|
|
check_result
|
|
|
|
# Clean up BIOS VM
|
|
greenprint "🧹 Clean up BIOS VM"
|
|
if [[ $(sudo virsh domstate "${IMAGE_KEY}-bios") == "running" ]]; then
|
|
sudo virsh destroy "${IMAGE_KEY}-bios"
|
|
fi
|
|
sudo virsh undefine "${IMAGE_KEY}-bios"
|
|
sudo rm -f "$LIBVIRT_BIOS_IMAGE_PATH"
|
|
|
|
##################################################
|
|
##
|
|
## Install, upgrade and test Edge image on UEFI VM
|
|
##
|
|
##################################################
|
|
# Install ostree image via anaconda.
|
|
greenprint "💿 Install ostree image via installer(ISO) on UEFI VM"
|
|
sudo virt-install --name="${IMAGE_KEY}-uefi"\
|
|
--disk path="${LIBVIRT_UEFI_IMAGE_PATH}",format=qcow2 \
|
|
--ram 3072 \
|
|
--vcpus 2 \
|
|
--network network=integration,mac=34:49:22:B0:83:31 \
|
|
--os-type linux \
|
|
--os-variant ${OS_VARIANT} \
|
|
--cdrom "/var/lib/libvirt/images/${ISO_FILENAME}" \
|
|
--boot uefi,loader_ro=yes,loader_type=pflash,nvram_template=/usr/share/edk2/ovmf/OVMF_VARS.fd,loader_secure=no \
|
|
--nographics \
|
|
--noautoconsole \
|
|
--wait=-1 \
|
|
--noreboot
|
|
|
|
# Start VM.
|
|
greenprint "💻 Start UEFI VM"
|
|
sudo virsh start "${IMAGE_KEY}-uefi"
|
|
|
|
# Check for ssh ready to go.
|
|
greenprint "🛃 Checking for SSH is ready to go"
|
|
for LOOP_COUNTER in $(seq 0 30); do
|
|
RESULTS="$(wait_for_ssh_up $UEFI_GUEST_ADDRESS)"
|
|
if [[ $RESULTS == 1 ]]; then
|
|
echo "SSH is ready now! 🥳"
|
|
break
|
|
fi
|
|
sleep 10
|
|
done
|
|
|
|
# Check image installation result
|
|
check_result
|
|
|
|
##################################################
|
|
##
|
|
## upgrade rhel-edge with new upgrade commit
|
|
##
|
|
##################################################
|
|
|
|
# Write a blueprint for ostree image.
|
|
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
|
name = "upgrade"
|
|
description = "An upgrade rhel-edge container image"
|
|
version = "0.0.2"
|
|
modules = []
|
|
groups = []
|
|
|
|
[[packages]]
|
|
name = "python3"
|
|
version = "*"
|
|
|
|
[[packages]]
|
|
name = "sssd"
|
|
version = "*"
|
|
|
|
[[packages]]
|
|
name = "wget"
|
|
version = "*"
|
|
EOF
|
|
|
|
# No RT kernel in Fedora
|
|
if [[ "$ID" != "fedora" ]]; then
|
|
tee -a "$BLUEPRINT_FILE" > /dev/null << EOF
|
|
[customizations.kernel]
|
|
name = "kernel-rt"
|
|
EOF
|
|
fi
|
|
|
|
greenprint "📄 upgrade blueprint"
|
|
cat "$BLUEPRINT_FILE"
|
|
|
|
# Prepare the blueprint for the compose.
|
|
greenprint "📋 Preparing upgrade blueprint"
|
|
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
|
|
sudo composer-cli blueprints depsolve upgrade
|
|
|
|
# Build upgrade image.
|
|
build_image upgrade "${CONTAINER_TYPE}" "$PROD_REPO_URL"
|
|
|
|
# Download the image
|
|
greenprint "📥 Downloading the upgrade image"
|
|
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
|
|
|
|
# Clear stage repo running env
|
|
greenprint "🧹 Clearing stage repo running env"
|
|
# Remove any status containers if exist
|
|
sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f
|
|
# Remove all images
|
|
sudo podman rmi -f -a
|
|
|
|
# Deal with stage repo container
|
|
greenprint "🗜 Extracting image"
|
|
IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}"
|
|
sudo podman pull "oci-archive:${IMAGE_FILENAME}"
|
|
sudo podman images
|
|
# Clear image file
|
|
sudo rm -f "$IMAGE_FILENAME"
|
|
|
|
# Run edge stage repo
|
|
greenprint "🛰 Running edge stage repo"
|
|
# Get image id to run image
|
|
EDGE_IMAGE_ID=$(sudo podman images --filter "dangling=true" --format "{{.ID}}")
|
|
sudo podman run -d --name rhel-edge --network edge --ip "$STAGE_REPO_ADDRESS" "$EDGE_IMAGE_ID"
|
|
# Wait for container to be running
|
|
until [ "$(sudo podman inspect -f '{{.State.Running}}' rhel-edge)" == "true" ]; do
|
|
sleep 1;
|
|
done;
|
|
|
|
# Pull upgrade to prod mirror
|
|
greenprint "⛓ Pull upgrade to prod mirror"
|
|
sudo ostree --repo="$PROD_REPO" pull --mirror edge-stage "$OSTREE_REF"
|
|
sudo ostree --repo="$PROD_REPO" static-delta generate "$OSTREE_REF"
|
|
sudo ostree --repo="$PROD_REPO" summary -u
|
|
|
|
# Get ostree commit value.
|
|
greenprint "🕹 Get ostree upgrade commit value"
|
|
UPGRADE_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}")
|
|
|
|
# Clean compose and blueprints.
|
|
greenprint "🧽 Clean up upgrade blueprint and compose"
|
|
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
|
sudo composer-cli blueprints delete upgrade > /dev/null
|
|
|
|
# Upgrade image/commit.
|
|
# Test user admin added by edge-container bp
|
|
greenprint "🗳 Upgrade ostree image/commit"
|
|
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${UEFI_GUEST_ADDRESS} "sudo rpm-ostree upgrade --os=${OSTREE_OSNAME}"
|
|
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${UEFI_GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit'
|
|
|
|
# Sleep 10 seconds here to make sure vm restarted already
|
|
sleep 10
|
|
|
|
# Check for ssh ready to go.
|
|
greenprint "🛃 Checking for SSH is ready to go"
|
|
# shellcheck disable=SC2034 # Unused variables left for readability
|
|
for LOOP_COUNTER in $(seq 0 30); do
|
|
RESULTS="$(wait_for_ssh_up $UEFI_GUEST_ADDRESS)"
|
|
if [[ $RESULTS == 1 ]]; then
|
|
echo "SSH is ready now! 🥳"
|
|
break
|
|
fi
|
|
sleep 10
|
|
done
|
|
|
|
# Check ostree upgrade result
|
|
check_result
|
|
|
|
# Add instance IP address into /etc/ansible/hosts
|
|
# Test user installeruser added by edge-installer bp
|
|
# User installer still exists after upgrade but upgrade bp does not contain installeruer
|
|
sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF
|
|
[ostree_guest]
|
|
${UEFI_GUEST_ADDRESS}
|
|
|
|
[ostree_guest:vars]
|
|
ansible_python_interpreter=/usr/bin/python3
|
|
ansible_user=${ANSIBLE_USER_FOR_BIOS}
|
|
ansible_private_key_file=${SSH_KEY}
|
|
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
|
EOF
|
|
|
|
# Test IoT/Edge OS
|
|
sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type="$OSTREE_OSNAME" -e ostree_commit="${UPGRADE_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0
|
|
check_result
|
|
|
|
# Final success clean up
|
|
clean_up
|
|
|
|
exit 0
|