No tests should be run directly from git, but should rather be installed
onto the test system using rpm and run from there. This moves towards
unifying our two types of test cases.
The new structure of is now:
`test/cmd`: the executors, one for each test-case. This is installed
into `/usr/libexec/test/osbuild-composer`.
`test/data`: data and config used by the tests. This is installed into
`/usr/share/tests/osbuild-composer`.
`schutzbot`: configuration of the actual test run. In particular, this
is where the distros and repositories to test against are
configured.
This is very much still work-in-progress, and is only the first step
towards simplifying schutzbot. Apart from moving files around, this
should be a noop.
Signed-off-by: Tom Gundersen <teg@jklm.no>
482 lines
15 KiB
Bash
Executable file
482 lines
15 KiB
Bash
Executable file
#!/bin/bash
|
|
set -euo pipefail
|
|
|
|
OSBUILD_COMPOSER_TEST_DATA=/usr/share/tests/osbuild-composer/
|
|
|
|
# Get OS data.
|
|
source /etc/os-release
|
|
ARCH=$(uname -m)
|
|
|
|
# Set os-variant and boot location used by virt-install.
|
|
case "${ID}-${VERSION_ID}" in
|
|
# Bypass ostree test on fedora-31 and rhel 8.2
|
|
"fedora-31")
|
|
exit 0;;
|
|
"rhel-8.2")
|
|
exit 0;;
|
|
"fedora-32")
|
|
IMAGE_TYPE=fedora-iot-commit
|
|
OSTREE_REF="fedora/32/${ARCH}/iot"
|
|
OS_VARIANT="fedora32"
|
|
BOOT_LOCATION="https://mirrors.rit.edu/fedora/fedora/linux/releases/32/Everything/x86_64/os/";;
|
|
"rhel-8.3")
|
|
# Override old rhel-8-beta.json because test needs latest systemd and redhat-release
|
|
sudo cp schutzbot/repositories/rhel-8-beta.json /etc/osbuild-composer/repositories/
|
|
sudo systemctl restart osbuild-composer.socket
|
|
IMAGE_TYPE=rhel-edge-commit
|
|
OSTREE_REF="rhel/8/${ARCH}/edge"
|
|
OS_VARIANT="rhel8-unknown"
|
|
BOOT_LOCATION="http://download.devel.redhat.com/rhel-8/nightly/RHEL-8/latest-RHEL-8.3/compose/BaseOS/x86_64/os/";;
|
|
*) ;;
|
|
esac
|
|
|
|
|
|
# Colorful output.
|
|
function greenprint {
|
|
echo -e "\033[1;32m${1}\033[0m"
|
|
}
|
|
|
|
# Mock is only available in EPEL for RHEL.
|
|
if [[ $ID == rhel ]] && ! rpm -q epel-release; then
|
|
greenprint "📦 Setting up EPEL repository"
|
|
curl -Ls --retry 5 --output /tmp/epel.rpm \
|
|
https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
|
sudo rpm -Uvh /tmp/epel.rpm
|
|
fi
|
|
|
|
# Install required packages.
|
|
greenprint "📦 Installing required packages"
|
|
sudo dnf -y install jq libvirt-client libvirt-daemon \
|
|
libvirt-daemon-config-network libvirt-daemon-config-nwfilter \
|
|
libvirt-daemon-driver-interface libvirt-daemon-driver-network \
|
|
libvirt-daemon-driver-nodedev libvirt-daemon-driver-nwfilter \
|
|
libvirt-daemon-driver-qemu libvirt-daemon-driver-secret \
|
|
libvirt-daemon-driver-storage libvirt-daemon-driver-storage-disk \
|
|
libvirt-daemon-kvm qemu-img qemu-kvm virt-install expect \
|
|
python3-lxml ansible httpd
|
|
|
|
# Start libvirtd and test it.
|
|
greenprint "🚀 Starting libvirt daemon"
|
|
sudo systemctl start libvirtd
|
|
sudo virsh list --all > /dev/null
|
|
|
|
# Set a customized dnsmasq configuration for libvirt so we always get the
|
|
# same address on bootup.
|
|
sudo tee /tmp/integration.xml > /dev/null << EOF
|
|
<network>
|
|
<name>integration</name>
|
|
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
|
|
<forward mode='nat'>
|
|
<nat>
|
|
<port start='1024' end='65535'/>
|
|
</nat>
|
|
</forward>
|
|
<bridge name='integration' stp='on' delay='0'/>
|
|
<mac address='52:54:00:36:46:ef'/>
|
|
<ip address='192.168.100.1' netmask='255.255.255.0'>
|
|
<dhcp>
|
|
<range start='192.168.100.2' end='192.168.100.254'/>
|
|
<host mac='34:49:22:B0:83:30' name='vm' ip='192.168.100.50'/>
|
|
</dhcp>
|
|
</ip>
|
|
</network>
|
|
EOF
|
|
if ! sudo virsh net-info integration > /dev/null 2>&1; then
|
|
sudo virsh net-define /tmp/integration.xml
|
|
sudo virsh net-start integration
|
|
fi
|
|
|
|
# Allow anyone in the wheel group to talk to libvirt.
|
|
greenprint "🚪 Allowing users in wheel group to talk to libvirt"
|
|
WHEEL_GROUP=wheel
|
|
if [[ $ID == rhel ]]; then
|
|
WHEEL_GROUP=adm
|
|
fi
|
|
sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF
|
|
polkit.addRule(function(action, subject) {
|
|
if (action.id == "org.libvirt.unix.manage" &&
|
|
subject.isInGroup("${WHEEL_GROUP}")) {
|
|
return polkit.Result.YES;
|
|
}
|
|
});
|
|
EOF
|
|
|
|
# Set up variables.
|
|
TEST_UUID=$(uuidgen)
|
|
IMAGE_KEY="osbuild-composer-ostree-test-${TEST_UUID}"
|
|
GUEST_ADDRESS=192.168.100.50
|
|
|
|
# Set up temporary files.
|
|
TEMPDIR=$(mktemp -d)
|
|
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
|
|
KS_FILE=${TEMPDIR}/ks.cfg
|
|
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
|
|
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
|
|
|
|
# SSH setup.
|
|
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5)
|
|
SSH_KEY=${OSBUILD_COMPOSER_TEST_DATA}keyring/id_rsa
|
|
|
|
# Get the compose log.
|
|
get_compose_log () {
|
|
COMPOSE_ID=$1
|
|
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log
|
|
|
|
# Download the logs.
|
|
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
|
|
}
|
|
|
|
# Get the compose metadata.
|
|
get_compose_metadata () {
|
|
COMPOSE_ID=$1
|
|
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json
|
|
|
|
# Download the metadata.
|
|
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
|
|
|
|
# Find the tarball and extract it.
|
|
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
|
|
tar -xf "$TARBALL" -C "${TEMPDIR}"
|
|
rm -f "$TARBALL"
|
|
|
|
# Move the JSON file into place.
|
|
cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
|
|
}
|
|
|
|
# Build ostree image.
|
|
build_image() {
|
|
blueprint_file=$1
|
|
blueprint_name=$2
|
|
|
|
# Prepare the blueprint for the compose.
|
|
greenprint "📋 Preparing blueprint"
|
|
sudo composer-cli blueprints push "$blueprint_file"
|
|
sudo composer-cli blueprints depsolve "$blueprint_name"
|
|
|
|
# Get worker unit file so we can watch the journal.
|
|
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
|
|
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
|
|
WORKER_JOURNAL_PID=$!
|
|
|
|
# Start the compose.
|
|
greenprint "🚀 Starting compose"
|
|
if [[ $blueprint_name == upgrade ]]; then
|
|
# Leave new version composer-cli here in case it got updated.
|
|
# sudo composer-cli --json compose start-ostree --ref $OSTREE_REF --parent $COMMIT_HASH $blueprint_name $IMAGE_TYPE | tee $COMPOSE_START
|
|
sudo composer-cli --json compose start-ostree "$blueprint_name" $IMAGE_TYPE "$OSTREE_REF" "$COMMIT_HASH" | tee "$COMPOSE_START"
|
|
else
|
|
sudo composer-cli --json compose start "$blueprint_name" $IMAGE_TYPE | tee "$COMPOSE_START"
|
|
fi
|
|
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
|
|
|
|
# Wait for the compose to finish.
|
|
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
|
|
while true; do
|
|
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
|
|
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
|
|
|
|
# Is the compose finished?
|
|
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
|
|
break
|
|
fi
|
|
|
|
# Wait 30 seconds and try again.
|
|
sleep 5
|
|
done
|
|
|
|
# Capture the compose logs from osbuild.
|
|
greenprint "💬 Getting compose log and metadata"
|
|
get_compose_log "$COMPOSE_ID"
|
|
get_compose_metadata "$COMPOSE_ID"
|
|
|
|
# Did the compose finish with success?
|
|
if [[ $COMPOSE_STATUS != FINISHED ]]; then
|
|
echo "Something went wrong with the compose. 😢"
|
|
exit 1
|
|
fi
|
|
|
|
# Stop watching the worker journal.
|
|
sudo kill ${WORKER_JOURNAL_PID}
|
|
}
|
|
|
|
# Wait for the ssh server up to be.
|
|
wait_for_ssh_up () {
|
|
SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"')
|
|
if [[ $SSH_STATUS == READY ]]; then
|
|
echo 1
|
|
else
|
|
echo 0
|
|
fi
|
|
}
|
|
|
|
# Clean up our mess.
|
|
clean_up () {
|
|
greenprint "🧼 Cleaning up"
|
|
sudo virsh destroy "${IMAGE_KEY}"
|
|
if [[ $ARCH == aarch64 ]]; then
|
|
sudo virsh undefine "${IMAGE_KEY}" --nvram
|
|
else
|
|
sudo virsh undefine "${IMAGE_KEY}"
|
|
fi
|
|
# Remove qcow2 file.
|
|
sudo rm -f "$LIBVIRT_IMAGE_PATH"
|
|
# Remove extracted upgrade image-tar.
|
|
sudo rm -rf "$UPGRADE_PATH"
|
|
# Remove "remote" repo.
|
|
sudo rm -rf "${HTTPD_PATH}"/{repo,compose.json}
|
|
# Remomve tmp dir.
|
|
sudo rm -rf "$TEMPDIR"
|
|
# Stop httpd
|
|
sudo systemctl disable httpd --now
|
|
}
|
|
|
|
# Test result checking
|
|
check_result () {
|
|
greenprint "Checking for test result"
|
|
if [[ $RESULTS == 1 ]]; then
|
|
greenprint "💚 Success"
|
|
else
|
|
greenprint "❌ Failed"
|
|
clean_up
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
##################################################
|
|
##
|
|
## ostree image/commit installation
|
|
##
|
|
##################################################
|
|
|
|
# Write a blueprint for ostree image.
|
|
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
|
name = "ostree"
|
|
description = "A base ostree image"
|
|
version = "0.0.1"
|
|
modules = []
|
|
groups = []
|
|
|
|
[[packages]]
|
|
name = "python36"
|
|
version = "*"
|
|
EOF
|
|
|
|
# Build installation image.
|
|
build_image "$BLUEPRINT_FILE" ostree
|
|
|
|
# Start httpd to serve ostree repo.
|
|
greenprint "🚀 Starting httpd daemon"
|
|
sudo systemctl start httpd
|
|
|
|
# Download the image and extract tar into web server root folder.
|
|
greenprint "📥 Downloading and extracting the image"
|
|
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
|
|
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
|
|
HTTPD_PATH="/var/www/html"
|
|
sudo tar -xf "${IMAGE_FILENAME}" -C ${HTTPD_PATH}
|
|
sudo rm -f "$IMAGE_FILENAME"
|
|
|
|
# Clean compose and blueprints.
|
|
greenprint "Clean up osbuild-composer"
|
|
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
|
sudo composer-cli blueprints delete ostree > /dev/null
|
|
|
|
# Get ostree commit value.
|
|
greenprint "Get ostree image commit value"
|
|
COMMIT_HASH=$(jq -r '."ostree-commit"' < ${HTTPD_PATH}/compose.json)
|
|
|
|
# Ensure SELinux is happy with our new images.
|
|
greenprint "👿 Running restorecon on image directory"
|
|
sudo restorecon -Rv /var/lib/libvirt/images/
|
|
|
|
# Create qcow2 file for virt install.
|
|
greenprint "Create qcow2 file for virt install"
|
|
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.qcow2
|
|
sudo qemu-img create -f qcow2 "${LIBVIRT_IMAGE_PATH}" 20G
|
|
|
|
# Write kickstart file for ostree image installation.
|
|
greenprint "Generate kickstart file"
|
|
tee "$KS_FILE" > /dev/null << STOPHERE
|
|
text
|
|
lang en_US.UTF-8
|
|
keyboard us
|
|
timezone --utc Etc/UTC
|
|
|
|
selinux --enforcing
|
|
rootpw --lock --iscrypted locked
|
|
user --name=admin --groups=wheel --iscrypted --password=\$6\$1LgwKw9aOoAi/Zy9\$Pn3ErY1E8/yEanJ98evqKEW.DZp24HTuqXPJl6GYCm8uuobAmwxLv7rGCvTRZhxtcYdmC0.XnYRSR9Sh6de3p0
|
|
sshkey --username=admin "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC61wMCjOSHwbVb4VfVyl5sn497qW4PsdQ7Ty7aD6wDNZ/QjjULkDV/yW5WjDlDQ7UqFH0Sr7vywjqDizUAqK7zM5FsUKsUXWHWwg/ehKg8j9xKcMv11AkFoUoujtfAujnKODkk58XSA9whPr7qcw3vPrmog680pnMSzf9LC7J6kXfs6lkoKfBh9VnlxusCrw2yg0qI1fHAZBLPx7mW6+me71QZsS6sVz8v8KXyrXsKTdnF50FjzHcK9HXDBtSJS5wA3fkcRYymJe0o6WMWNdgSRVpoSiWaHHmFgdMUJaYoCfhXzyl7LtNb3Q+Sveg+tJK7JaRXBLMUllOlJ6ll5Hod root@localhost"
|
|
|
|
bootloader --timeout=1 --append="net.ifnames=0 modprobe.blacklist=vc4"
|
|
|
|
network --bootproto=dhcp --device=link --activate --onboot=on
|
|
|
|
zerombr
|
|
clearpart --all --initlabel --disklabel=msdos
|
|
autopart --nohome --noswap --type=plain
|
|
ostreesetup --nogpg --osname=${IMAGE_TYPE} --remote=${IMAGE_TYPE} --url=http://192.168.100.1/repo/ --ref=${OSTREE_REF}
|
|
poweroff
|
|
|
|
%post --log=/var/log/anaconda/post-install.log --erroronfail
|
|
|
|
# no sudo password for user admin
|
|
echo -e 'admin\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
|
|
|
|
# Remove any persistent NIC rules generated by udev
|
|
rm -vf /etc/udev/rules.d/*persistent-net*.rules
|
|
# And ensure that we will do DHCP on eth0 on startup
|
|
cat > /etc/sysconfig/network-scripts/ifcfg-eth0 << EOF
|
|
DEVICE="eth0"
|
|
BOOTPROTO="dhcp"
|
|
ONBOOT="yes"
|
|
TYPE="Ethernet"
|
|
PERSISTENT_DHCLIENT="yes"
|
|
EOF
|
|
|
|
echo "Packages within this iot or edge image:"
|
|
echo "-----------------------------------------------------------------------"
|
|
rpm -qa | sort
|
|
echo "-----------------------------------------------------------------------"
|
|
# Note that running rpm recreates the rpm db files which aren't needed/wanted
|
|
rm -f /var/lib/rpm/__db*
|
|
|
|
echo "Zeroing out empty space."
|
|
# This forces the filesystem to reclaim space from deleted files
|
|
dd bs=1M if=/dev/zero of=/var/tmp/zeros || :
|
|
rm -f /var/tmp/zeros
|
|
echo "(Don't worry -- that out-of-space error was expected.)"
|
|
|
|
%end
|
|
STOPHERE
|
|
|
|
# Install ostree image via anaconda.
|
|
greenprint "Install ostree image via anaconda"
|
|
sudo virt-install --initrd-inject="${KS_FILE}" \
|
|
--extra-args="ks=file:/ks.cfg console=ttyS0,115200" \
|
|
--name="${IMAGE_KEY}"\
|
|
--disk path="${LIBVIRT_IMAGE_PATH}",format=qcow2 \
|
|
--ram 3072 \
|
|
--vcpus 2 \
|
|
--network network=integration,mac=34:49:22:B0:83:30 \
|
|
--os-type linux \
|
|
--os-variant ${OS_VARIANT} \
|
|
--location ${BOOT_LOCATION} \
|
|
--nographics \
|
|
--noautoconsole \
|
|
--wait=-1 \
|
|
--noreboot
|
|
|
|
# Start VM.
|
|
greenprint "Start VM"
|
|
sudo virsh start "${IMAGE_KEY}"
|
|
|
|
# Check for ssh ready to go.
|
|
greenprint "🛃 Checking for SSH is ready to go"
|
|
for LOOP_COUNTER in $(seq 0 30); do
|
|
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
|
|
if [[ $RESULTS == 1 ]]; then
|
|
echo "SSH is ready now! 🥳"
|
|
break
|
|
fi
|
|
sleep 10
|
|
done
|
|
|
|
# Check image installation result
|
|
check_result
|
|
|
|
##################################################
|
|
##
|
|
## ostree image/commit upgrade
|
|
##
|
|
##################################################
|
|
|
|
# Write a blueprint for ostree image.
|
|
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
|
name = "upgrade"
|
|
description = "An upgrade ostree image"
|
|
version = "0.0.2"
|
|
modules = []
|
|
groups = []
|
|
|
|
[[packages]]
|
|
name = "python36"
|
|
version = "*"
|
|
|
|
[[packages]]
|
|
name = "wget"
|
|
version = "*"
|
|
EOF
|
|
|
|
# Build upgrade image.
|
|
build_image "$BLUEPRINT_FILE" upgrade
|
|
|
|
# Download the image and extract tar into web server root folder.
|
|
greenprint "📥 Downloading and extracting the image"
|
|
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
|
|
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
|
|
UPGRADE_PATH="$(pwd)/upgrade"
|
|
mkdir -p "$UPGRADE_PATH"
|
|
sudo tar -xf "$IMAGE_FILENAME" -C "$UPGRADE_PATH"
|
|
sudo rm -f "$IMAGE_FILENAME"
|
|
|
|
# Clean compose and blueprints.
|
|
greenprint "Clean up osbuild-composer again"
|
|
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
|
sudo composer-cli blueprints delete upgrade > /dev/null
|
|
|
|
# Introduce new ostree commit into repo.
|
|
greenprint "Introduce new ostree commit into repo"
|
|
sudo ostree pull-local --repo "${HTTPD_PATH}/repo" "${UPGRADE_PATH}/repo" "$OSTREE_REF"
|
|
sudo ostree summary --update --repo "${HTTPD_PATH}/repo"
|
|
|
|
# Ensure SELinux is happy with all objects files.
|
|
greenprint "👿 Running restorecon on web server root folder"
|
|
sudo restorecon -Rv "${HTTPD_PATH}/repo" > /dev/null
|
|
|
|
# Get ostree commit value.
|
|
greenprint "Get ostree image commit value"
|
|
UPGRADE_HASH=$(jq -r '."ostree-commit"' < "${UPGRADE_PATH}"/compose.json)
|
|
|
|
# Upgrade image/commit.
|
|
greenprint "Upgrade ostree image/commit"
|
|
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${GUEST_ADDRESS} 'sudo rpm-ostree upgrade'
|
|
sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@${GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit'
|
|
|
|
# Sleep 10 seconds here to make sure vm restarted already
|
|
sleep 10
|
|
|
|
# Check for ssh ready to go.
|
|
greenprint "🛃 Checking for SSH is ready to go"
|
|
# shellcheck disable=SC2034 # Unused variables left for readability
|
|
for LOOP_COUNTER in $(seq 0 30); do
|
|
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
|
|
if [[ $RESULTS == 1 ]]; then
|
|
echo "SSH is ready now! 🥳"
|
|
break
|
|
fi
|
|
sleep 10
|
|
done
|
|
|
|
# Check ostree upgrade result
|
|
check_result
|
|
|
|
# Add instance IP address into /etc/ansible/hosts
|
|
sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF
|
|
[ostree_guest]
|
|
${GUEST_ADDRESS}
|
|
|
|
[ostree_guest:vars]
|
|
ansible_python_interpreter=/usr/bin/python3
|
|
ansible_user=admin
|
|
ansible_private_key_file=${SSH_KEY}
|
|
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
|
EOF
|
|
|
|
# Test IoT/Edge OS
|
|
sudo ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=${IMAGE_TYPE} -e ostree_commit="${UPGRADE_HASH}" /usr/share/tests/osbuild-composer/ansible/check_ostree.yaml || RESULTS=0
|
|
check_result
|
|
|
|
# Final success clean up
|
|
clean_up
|
|
|
|
exit 0
|