*.sh: apply automatically all suggestions from shellcheck

This commit was automatically generated using:
```
$ fd --exclude vendor sh | xargs shellcheck -f diff | git apply
```
This commit is contained in:
Martin Sehnoutka 2020-09-17 15:15:16 +02:00 committed by Ondřej Budai
parent 4c476f32c2
commit b06e6dd916
8 changed files with 146 additions and 146 deletions

View file

@ -141,10 +141,10 @@ else
exit 2
fi
if [ $1 == "start" ]; then
if [ "$1" == "start" ]; then
koji_start
fi
if [ $1 == "stop" ]; then
if [ "$1" == "stop" ]; then
koji_stop
fi

View file

@ -23,8 +23,8 @@ source /etc/os-release
# Register RHEL if we are provided with a registration script.
if [[ -n "${RHN_REGISTRATION_SCRIPT:-}" ]] && ! sudo subscription-manager status; then
sudo chmod +x $RHN_REGISTRATION_SCRIPT
sudo $RHN_REGISTRATION_SCRIPT
sudo chmod +x "$RHN_REGISTRATION_SCRIPT"
sudo "$RHN_REGISTRATION_SCRIPT"
fi
# Restart systemd to work around some Fedora issues in cloud images.
@ -80,6 +80,6 @@ sudo systemctl enable --now osbuild-composer-koji.socket
# Verify that the API is running.
sudo composer-cli status show
sudo composer-cli sources list
for SOURCE in `sudo composer-cli sources list`; do
sudo composer-cli sources info $SOURCE
for SOURCE in $(sudo composer-cli sources list); do
sudo composer-cli sources info "$SOURCE"
done

View file

@ -21,8 +21,8 @@ fi
# Register RHEL if we are provided with a registration script.
if [[ -n "${RHN_REGISTRATION_SCRIPT:-}" ]] && ! sudo subscription-manager status; then
greenprint "🪙 Registering RHEL instance"
sudo chmod +x $RHN_REGISTRATION_SCRIPT
sudo $RHN_REGISTRATION_SCRIPT
sudo chmod +x "$RHN_REGISTRATION_SCRIPT"
sudo "$RHN_REGISTRATION_SCRIPT"
fi
# Install requirements for building RPMs in mock.
@ -88,27 +88,27 @@ fi
# Compile RPMs in a mock chroot
greenprint "🎁 Building RPMs with mock"
sudo mock -r $MOCK_CONFIG --resultdir $REPO_DIR --with=tests \
sudo mock -r "$MOCK_CONFIG" --resultdir "$REPO_DIR" --with=tests \
rpmbuild/SRPMS/*.src.rpm osbuild/rpmbuild/SRPMS/*.src.rpm
# Change the ownership of all of our repo files from root to our CI user.
sudo chown -R $USER ${REPO_DIR%%/*}
sudo chown -R "$USER" "${REPO_DIR%%/*}"
# Move the logs out of the way.
greenprint "🧹 Retaining logs from mock build"
mv ${REPO_DIR}/*.log $WORKSPACE
mv "${REPO_DIR}"/*.log "$WORKSPACE"
# Create a repo of the built RPMs.
greenprint "⛓️ Creating dnf repository"
createrepo_c ${REPO_DIR}
createrepo_c "${REPO_DIR}"
# Copy the current build to the latest directory.
mkdir -p $REPO_DIR_LATEST
cp -arv ${REPO_DIR}/ ${REPO_DIR_LATEST}/
mkdir -p "$REPO_DIR_LATEST"
cp -arv "${REPO_DIR}"/ "${REPO_DIR_LATEST}"/
# Remove the previous latest build for this branch.
# Don't fail if the path is missing.
s3cmd --recursive rm s3://${REPO_BUCKET}/${JOB_NAME}/latest/${ID}${VERSION_ID//./}_${ARCH} || true
s3cmd --recursive rm s3://${REPO_BUCKET}/"${JOB_NAME}"/latest/"${ID}""${VERSION_ID//./}"_"${ARCH}" || true
# Upload repository to S3.
greenprint "☁ Uploading RPMs to S3"

View file

@ -20,13 +20,13 @@ test_divider () {
# Run a test case and store the result as passed or failed.
run_test_case () {
TEST_NAME=$(basename $1)
TEST_NAME=$(basename "$1")
echo
test_divider
echo "🏃🏻 Running test: ${TEST_NAME}"
test_divider
if sudo ${1} -test.v | tee ${WORKSPACE}/${TEST_NAME}.log; then
if sudo "${1}" -test.v | tee "${WORKSPACE}"/"${TEST_NAME}".log; then
PASSED_TESTS+=($TEST_NAME)
else
FAILED_TESTS+=($TEST_NAME)
@ -44,7 +44,7 @@ cd $WORKING_DIRECTORY
# Run each test case.
for TEST_CASE in "${TEST_CASES[@]}"; do
run_test_case ${TESTS_PATH}/$TEST_CASE
run_test_case ${TESTS_PATH}/"$TEST_CASE"
done
# Print a report of the test results.

View file

@ -21,7 +21,7 @@ test_divider () {
get_test_cases () {
TEST_CASE_SELECTOR="${ID}_${VERSION_ID%.*}-${ARCH}*.json"
pushd $IMAGE_TEST_CASES_PATH > /dev/null
ls $TEST_CASE_SELECTOR
ls "$TEST_CASE_SELECTOR"
popd > /dev/null
}
@ -29,7 +29,7 @@ get_test_cases () {
run_test_case () {
TEST_RUNNER=$1
TEST_CASE_FILENAME=$2
TEST_NAME=$(basename $TEST_CASE_FILENAME)
TEST_NAME=$(basename "$TEST_CASE_FILENAME")
echo
test_divider
@ -58,11 +58,11 @@ run_test_case () {
AZURE_CREDS=${AZURE_CREDS-/dev/null}
OPENSTACK_CREDS=${OPENSTACK_CREDS-/dev/null}
VCENTER_CREDS=${VCENTER_CREDS-/dev/null}
TEST_CMD="env $(cat $AZURE_CREDS $OPENSTACK_CREDS $VCENTER_CREDS) BRANCH_NAME=${BRANCH_NAME-master} BUILD_ID=$BUILD_ID DISTRO_CODE=$DISTRO_CODE $TEST_RUNNER -test.v ${IMAGE_TEST_CASES_PATH}/${TEST_CASE_FILENAME}"
TEST_CMD="env $(cat "$AZURE_CREDS" "$OPENSTACK_CREDS" "$VCENTER_CREDS") BRANCH_NAME=${BRANCH_NAME-master} BUILD_ID=$BUILD_ID DISTRO_CODE=$DISTRO_CODE $TEST_RUNNER -test.v ${IMAGE_TEST_CASES_PATH}/${TEST_CASE_FILENAME}"
# Run the test and add the test name to the list of passed or failed
# tests depending on the result.
if sudo $TEST_CMD 2>&1 | tee ${WORKSPACE}/${TEST_NAME}.log; then
if sudo "$TEST_CMD" 2>&1 | tee "${WORKSPACE}"/"${TEST_NAME}".log; then
PASSED_TESTS+=("$TEST_NAME")
else
FAILED_TESTS+=("$TEST_NAME")
@ -82,7 +82,7 @@ cd $WORKING_DIRECTORY
# Run each test case.
for TEST_CASE in $(get_test_cases); do
run_test_case $IMAGE_TEST_CASE_RUNNER $TEST_CASE
run_test_case $IMAGE_TEST_CASE_RUNNER "$TEST_CASE"
done
# Print a report of the test results.

View file

@ -66,9 +66,9 @@ INSTANCE_CONSOLE=${TEMPDIR}/instance-console-${IMAGE_KEY}.json
smoke_test_check () {
# Ensure the ssh key has restricted permissions.
SSH_KEY=${WORKSPACE}/test/keyring/id_rsa
chmod 0600 $SSH_KEY
chmod 0600 "$SSH_KEY"
SMOKE_TEST=$(ssh -i ${SSH_KEY} redhat@${1} 'cat /etc/smoke-test.txt')
SMOKE_TEST=$(ssh -i "${SSH_KEY}" redhat@"${1}" 'cat /etc/smoke-test.txt')
if [[ $SMOKE_TEST == smoke-test ]]; then
echo 1
else
@ -82,7 +82,7 @@ get_compose_log () {
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.log
# Download the logs.
sudo composer-cli compose log $COMPOSE_ID | tee $LOG_FILE > /dev/null
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
@ -91,15 +91,15 @@ get_compose_metadata () {
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.json
# Download the metadata.
sudo composer-cli compose metadata $COMPOSE_ID > /dev/null
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename $(find . -maxdepth 1 -type f -name "*-metadata.tar"))
tar -xf $TARBALL
rm -f $TARBALL
tar -xf "$TARBALL"
rm -f "$TARBALL"
# Move the JSON file into place.
cat ${COMPOSE_ID}.json | jq -M '.' | tee $METADATA_FILE > /dev/null
cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Get the console screenshot from the AWS instance.
@ -108,12 +108,12 @@ store_instance_screenshot () {
LOOP_COUNTER=${2}
SCREENSHOT_FILE=${WORKSPACE}/console-screenshot-${ID}-${VERSION_ID}-${LOOP_COUNTER}.jpg
$AWS_CMD ec2 get-console-screenshot --instance-id ${1} > $INSTANCE_CONSOLE
jq -r '.ImageData' $INSTANCE_CONSOLE | base64 -d - > $SCREENSHOT_FILE
$AWS_CMD ec2 get-console-screenshot --instance-id "${1}" > "$INSTANCE_CONSOLE"
jq -r '.ImageData' "$INSTANCE_CONSOLE" | base64 -d - > "$SCREENSHOT_FILE"
}
# Write an AWS TOML file
tee $AWS_CONFIG > /dev/null << EOF
tee "$AWS_CONFIG" > /dev/null << EOF
provider = "aws"
[settings]
@ -125,7 +125,7 @@ key = "${IMAGE_KEY}"
EOF
# Write a basic blueprint for our image.
tee $BLUEPRINT_FILE > /dev/null << EOF
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bash"
description = "A base system with bash"
version = "0.0.1"
@ -139,24 +139,24 @@ EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push $BLUEPRINT_FILE
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bash
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | egrep -o "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u ${WORKER_UNIT} &
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose and upload to AWS.
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bash ami $IMAGE_KEY $AWS_CONFIG | tee $COMPOSE_START
COMPOSE_ID=$(jq -r '.build_id' $COMPOSE_START)
sudo composer-cli --json compose start bash ami "$IMAGE_KEY" "$AWS_CONFIG" | tee "$COMPOSE_START"
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info ${COMPOSE_ID} | tee $COMPOSE_INFO > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' $COMPOSE_INFO)
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
@ -169,8 +169,8 @@ done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log $COMPOSE_ID
get_compose_metadata $COMPOSE_ID
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
@ -182,10 +182,10 @@ fi
greenprint "🔍 Search for created AMI"
$AWS_CMD ec2 describe-images \
--owners self \
--filters Name=name,Values=${IMAGE_KEY} \
| tee $AMI_DATA > /dev/null
--filters Name=name,Values="${IMAGE_KEY}" \
| tee "$AMI_DATA" > /dev/null
AMI_IMAGE_ID=$(jq -r '.Images[].ImageId' $AMI_DATA)
AMI_IMAGE_ID=$(jq -r '.Images[].ImageId' "$AMI_DATA")
# Stop watching the worker journal.
sudo kill ${WORKER_JOURNAL_PID}
@ -193,7 +193,7 @@ sudo kill ${WORKER_JOURNAL_PID}
# NOTE(mhayden): Getting TagSpecifications to play along with bash's
# parsing of curly braces and square brackets is nuts, so we just write some
# json and pass it to the aws command.
tee $AWS_INSTANCE_JSON > /dev/null << EOF
tee "$AWS_INSTANCE_JSON" > /dev/null << EOF
{
"TagSpecifications": [
{
@ -214,19 +214,19 @@ greenprint "👷🏻 Building instance in AWS"
$AWS_CMD ec2 run-instances \
--associate-public-ip-address \
--key-name personal_servers \
--image-id ${AMI_IMAGE_ID} \
--image-id "${AMI_IMAGE_ID}" \
--instance-type t3a.micro \
--user-data file://${WORKSPACE}/test/cloud-init/user-data \
--cli-input-json file://${AWS_INSTANCE_JSON} > /dev/null
--user-data file://"${WORKSPACE}"/test/cloud-init/user-data \
--cli-input-json file://"${AWS_INSTANCE_JSON}" > /dev/null
# Wait for the instance to finish building.
greenprint "⏱ Waiting for AWS instance to be marked as running"
while true; do
$AWS_CMD ec2 describe-instances \
--filters Name=image-id,Values=${AMI_IMAGE_ID} \
| tee $INSTANCE_DATA > /dev/null
--filters Name=image-id,Values="${AMI_IMAGE_ID}" \
| tee "$INSTANCE_DATA" > /dev/null
INSTANCE_STATUS=$(jq -r '.Reservations[].Instances[].State.Name' $INSTANCE_DATA)
INSTANCE_STATUS=$(jq -r '.Reservations[].Instances[].State.Name' "$INSTANCE_DATA")
# Break the loop if our instance is running.
if [[ $INSTANCE_STATUS == running ]]; then
@ -239,21 +239,21 @@ while true; do
done
# Get data about the instance we built.
INSTANCE_ID=$(jq -r '.Reservations[].Instances[].InstanceId' $INSTANCE_DATA)
PUBLIC_IP=$(jq -r '.Reservations[].Instances[].PublicIpAddress' $INSTANCE_DATA)
INSTANCE_ID=$(jq -r '.Reservations[].Instances[].InstanceId' "$INSTANCE_DATA")
PUBLIC_IP=$(jq -r '.Reservations[].Instances[].PublicIpAddress' "$INSTANCE_DATA")
# Wait for the node to come online.
greenprint "⏱ Waiting for AWS instance to respond to ssh"
for LOOP_COUNTER in {0..30}; do
if ssh-keyscan $PUBLIC_IP 2>&1 > /dev/null; then
if ssh-keyscan "$PUBLIC_IP" 2>&1 > /dev/null; then
echo "SSH is up!"
ssh-keyscan $PUBLIC_IP >> ~/.ssh/known_hosts
ssh-keyscan "$PUBLIC_IP" >> ~/.ssh/known_hosts
break
fi
# Get a screenshot of the instance console.
echo "Getting instance screenshot..."
store_instance_screenshot $INSTANCE_ID $LOOP_COUNTER || true
store_instance_screenshot "$INSTANCE_ID" $LOOP_COUNTER || true
# ssh-keyscan has a 5 second timeout by default, so the pause per loop
# is 10 seconds when you include the following `sleep`.
@ -264,7 +264,7 @@ done
# Check for our smoke test file.
greenprint "🛃 Checking for smoke test file"
for LOOP_COUNTER in {0..10}; do
RESULTS="$(smoke_test_check $PUBLIC_IP)"
RESULTS="$(smoke_test_check "$PUBLIC_IP")"
if [[ $RESULTS == 1 ]]; then
echo "Smoke test passed! 🥳"
break
@ -274,10 +274,10 @@ done
# Clean up our mess.
greenprint "🧼 Cleaning up"
SNAPSHOT_ID=$(jq -r '.Images[].BlockDeviceMappings[].Ebs.SnapshotId' $AMI_DATA)
$AWS_CMD ec2 terminate-instances --instance-id ${INSTANCE_ID}
$AWS_CMD ec2 deregister-image --image-id ${AMI_IMAGE_ID}
$AWS_CMD ec2 delete-snapshot --snapshot-id ${SNAPSHOT_ID}
SNAPSHOT_ID=$(jq -r '.Images[].BlockDeviceMappings[].Ebs.SnapshotId' "$AMI_DATA")
$AWS_CMD ec2 terminate-instances --instance-id "${INSTANCE_ID}"
$AWS_CMD ec2 deregister-image --image-id "${AMI_IMAGE_ID}"
$AWS_CMD ec2 delete-snapshot --snapshot-id "${SNAPSHOT_ID}"
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then

View file

@ -19,7 +19,7 @@ case "${ID}-${VERSION_ID}" in
BOOT_LOCATION="https://mirrors.rit.edu/fedora/fedora/linux/releases/32/Everything/x86_64/os/";;
"rhel-8.3")
# Override old rhel-8-beta.json because test needs latest systemd and redhat-release
sudo cp $(dirname $0)/rhel-8-beta.json /etc/osbuild-composer/repositories/
sudo cp $(dirname "$0")/rhel-8-beta.json /etc/osbuild-composer/repositories/
sudo systemctl restart osbuild-composer.socket
IMAGE_TYPE=rhel-edge-commit
OSTREE_REF="rhel/8/${ARCH}/edge"
@ -114,7 +114,7 @@ COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
# SSH setup.
SSH_OPTIONS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5"
SSH_KEY=${WORKSPACE}/test/keyring/id_rsa
chmod 0600 $SSH_KEY
chmod 0600 "$SSH_KEY"
# Get the compose log.
get_compose_log () {
@ -122,7 +122,7 @@ get_compose_log () {
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log
# Download the logs.
sudo composer-cli compose log $COMPOSE_ID | tee $LOG_FILE > /dev/null
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
@ -131,15 +131,15 @@ get_compose_metadata () {
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json
# Download the metadata.
sudo composer-cli compose metadata $COMPOSE_ID > /dev/null
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename $(find . -maxdepth 1 -type f -name "*-metadata.tar"))
tar -xf $TARBALL -C ${TEMPDIR}
rm -f $TARBALL
tar -xf "$TARBALL" -C "${TEMPDIR}"
rm -f "$TARBALL"
# Move the JSON file into place.
cat ${TEMPDIR}/${COMPOSE_ID}.json | jq -M '.' | tee $METADATA_FILE > /dev/null
cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Build ostree image.
@ -149,12 +149,12 @@ build_image() {
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push $blueprint_file
sudo composer-cli blueprints depsolve $blueprint_name
sudo composer-cli blueprints push "$blueprint_file"
sudo composer-cli blueprints depsolve "$blueprint_name"
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | egrep -o "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u ${WORKER_UNIT} &
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose.
@ -162,17 +162,17 @@ build_image() {
if [[ $blueprint_name == upgrade ]]; then
# Leave new version composer-cli here in case it got updated.
# sudo composer-cli --json compose start-ostree --ref $OSTREE_REF --parent $COMMIT_HASH $blueprint_name $IMAGE_TYPE | tee $COMPOSE_START
sudo composer-cli --json compose start-ostree $blueprint_name $IMAGE_TYPE $OSTREE_REF $COMMIT_HASH | tee $COMPOSE_START
sudo composer-cli --json compose start-ostree "$blueprint_name" $IMAGE_TYPE "$OSTREE_REF" "$COMMIT_HASH" | tee "$COMPOSE_START"
else
sudo composer-cli --json compose start $blueprint_name $IMAGE_TYPE | tee $COMPOSE_START
sudo composer-cli --json compose start "$blueprint_name" $IMAGE_TYPE | tee "$COMPOSE_START"
fi
COMPOSE_ID=$(jq -r '.build_id' $COMPOSE_START)
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info ${COMPOSE_ID} | tee $COMPOSE_INFO > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' $COMPOSE_INFO)
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
@ -185,8 +185,8 @@ build_image() {
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log $COMPOSE_ID
get_compose_metadata $COMPOSE_ID
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
@ -200,7 +200,7 @@ build_image() {
# Wait for the ssh server up to be.
wait_for_ssh_up () {
SSH_STATUS=$(ssh $SSH_OPTIONS -i ${SSH_KEY} admin@${1} '/bin/bash -c "echo -n READY"')
SSH_STATUS=$(ssh "$SSH_OPTIONS" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"')
if [[ $SSH_STATUS == READY ]]; then
echo 1
else
@ -211,20 +211,20 @@ wait_for_ssh_up () {
# Clean up our mess.
clean_up () {
greenprint "🧼 Cleaning up"
sudo virsh destroy ${IMAGE_KEY}
sudo virsh destroy "${IMAGE_KEY}"
if [[ $ARCH == aarch64 ]]; then
sudo virsh undefine ${IMAGE_KEY} --nvram
sudo virsh undefine "${IMAGE_KEY}" --nvram
else
sudo virsh undefine ${IMAGE_KEY}
sudo virsh undefine "${IMAGE_KEY}"
fi
# Remove qcow2 file.
sudo rm -f $LIBVIRT_IMAGE_PATH
sudo rm -f "$LIBVIRT_IMAGE_PATH"
# Remove extracted upgrade image-tar.
sudo rm -rf $UPGRADE_PATH
sudo rm -rf "$UPGRADE_PATH"
# Remove "remote" repo.
sudo rm -rf ${HTTPD_PATH}/{repo,compose.json}
sudo rm -rf "${HTTPD_PATH}"/{repo,compose.json}
# Remomve tmp dir.
sudo rm -rf $TEMPDIR
sudo rm -rf "$TEMPDIR"
# Stop httpd
sudo systemctl disable httpd --now
}
@ -248,7 +248,7 @@ check_result () {
##################################################
# Write a blueprint for ostree image.
tee $BLUEPRINT_FILE > /dev/null << EOF
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "ostree"
description = "A base ostree image"
version = "0.0.1"
@ -261,7 +261,7 @@ version = "*"
EOF
# Build installation image.
build_image $BLUEPRINT_FILE ostree
build_image "$BLUEPRINT_FILE" ostree
# Start httpd to serve ostree repo.
greenprint "🚀 Starting httpd daemon"
@ -269,15 +269,15 @@ sudo systemctl start httpd
# Download the image and extract tar into web server root folder.
greenprint "📥 Downloading and extracting the image"
sudo composer-cli compose image ${COMPOSE_ID} > /dev/null
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
HTTPD_PATH="/var/www/html"
sudo tar -xf ${IMAGE_FILENAME} -C ${HTTPD_PATH}
sudo rm -f $IMAGE_FILENAME
sudo tar -xf "${IMAGE_FILENAME}" -C ${HTTPD_PATH}
sudo rm -f "$IMAGE_FILENAME"
# Clean compose and blueprints.
greenprint "Clean up osbuild-composer"
sudo composer-cli compose delete ${COMPOSE_ID} > /dev/null
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
sudo composer-cli blueprints delete ostree > /dev/null
# Get ostree commit value.
@ -291,11 +291,11 @@ sudo restorecon -Rv /var/lib/libvirt/images/
# Create qcow2 file for virt install.
greenprint "Create qcow2 file for virt install"
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.qcow2
sudo qemu-img create -f qcow2 ${LIBVIRT_IMAGE_PATH} 20G
sudo qemu-img create -f qcow2 "${LIBVIRT_IMAGE_PATH}" 20G
# Write kickstart file for ostree image installation.
greenprint "Generate kickstart file"
tee $KS_FILE > /dev/null << STOPHERE
tee "$KS_FILE" > /dev/null << STOPHERE
text
lang en_US.UTF-8
keyboard us
@ -350,10 +350,10 @@ STOPHERE
# Install ostree image via anaconda.
greenprint "Install ostree image via anaconda"
sudo virt-install --initrd-inject=${KS_FILE} \
sudo virt-install --initrd-inject="${KS_FILE}" \
--extra-args="ks=file:/ks.cfg console=ttyS0,115200" \
--name=${IMAGE_KEY}\
--disk path=${LIBVIRT_IMAGE_PATH},format=qcow2 \
--name="${IMAGE_KEY}"\
--disk path="${LIBVIRT_IMAGE_PATH}",format=qcow2 \
--ram 3072 \
--vcpus 2 \
--network network=integration,mac=34:49:22:B0:83:30 \
@ -367,11 +367,11 @@ sudo virt-install --initrd-inject=${KS_FILE} \
# Start VM.
greenprint "Start VM"
sudo virsh start ${IMAGE_KEY}
sudo virsh start "${IMAGE_KEY}"
# Check for ssh ready to go.
greenprint "🛃 Checking for SSH is ready to go"
for LOOP_COUNTER in `seq 0 30`; do
for LOOP_COUNTER in $(seq 0 30); do
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "SSH is ready now! 🥳"
@ -390,7 +390,7 @@ check_result
##################################################
# Write a blueprint for ostree image.
tee $BLUEPRINT_FILE > /dev/null << EOF
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "upgrade"
description = "An upgrade ostree image"
version = "0.0.2"
@ -407,25 +407,25 @@ version = "*"
EOF
# Build upgrade image.
build_image $BLUEPRINT_FILE upgrade
build_image "$BLUEPRINT_FILE" upgrade
# Download the image and extract tar into web server root folder.
greenprint "📥 Downloading and extracting the image"
sudo composer-cli compose image ${COMPOSE_ID} > /dev/null
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME="${COMPOSE_ID}-commit.tar"
UPGRADE_PATH="$(pwd)/upgrade"
mkdir -p $UPGRADE_PATH
sudo tar -xf $IMAGE_FILENAME -C $UPGRADE_PATH
sudo rm -f $IMAGE_FILENAME
mkdir -p "$UPGRADE_PATH"
sudo tar -xf "$IMAGE_FILENAME" -C "$UPGRADE_PATH"
sudo rm -f "$IMAGE_FILENAME"
# Clean compose and blueprints.
greenprint "Clean up osbuild-composer again"
sudo composer-cli compose delete ${COMPOSE_ID} > /dev/null
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
sudo composer-cli blueprints delete upgrade > /dev/null
# Introduce new ostree commit into repo.
greenprint "Introduce new ostree commit into repo"
sudo ostree pull-local --repo "${HTTPD_PATH}/repo" "${UPGRADE_PATH}/repo" $OSTREE_REF
sudo ostree pull-local --repo "${HTTPD_PATH}/repo" "${UPGRADE_PATH}/repo" "$OSTREE_REF"
sudo ostree summary --update --repo "${HTTPD_PATH}/repo"
# Ensure SELinux is happy with all objects files.
@ -434,19 +434,19 @@ sudo restorecon -Rv "${HTTPD_PATH}/repo" > /dev/null
# Get ostree commit value.
greenprint "Get ostree image commit value"
UPGRADE_HASH=$(jq -r '."ostree-commit"' < ${UPGRADE_PATH}/compose.json)
UPGRADE_HASH=$(jq -r '."ostree-commit"' < "${UPGRADE_PATH}"/compose.json)
# Upgrade image/commit.
greenprint "Upgrade ostree image/commit"
ssh $SSH_OPTIONS -i ${SSH_KEY} admin@${GUEST_ADDRESS} 'sudo rpm-ostree upgrade'
ssh $SSH_OPTIONS -i ${SSH_KEY} admin@${GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit'
ssh "$SSH_OPTIONS" -i "${SSH_KEY}" admin@${GUEST_ADDRESS} 'sudo rpm-ostree upgrade'
ssh "$SSH_OPTIONS" -i "${SSH_KEY}" admin@${GUEST_ADDRESS} 'nohup sudo systemctl reboot &>/dev/null & exit'
# Sleep 10 seconds here to make sure vm restarted already
sleep 10
# Check for ssh ready to go.
greenprint "🛃 Checking for SSH is ready to go"
for LOOP_COUNTER in `seq 0 30`; do
for LOOP_COUNTER in $(seq 0 30); do
RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "SSH is ready now! 🥳"
@ -459,7 +459,7 @@ done
check_result
# Add instance IP address into /etc/ansible/hosts
sudo tee ${TEMPDIR}/inventory > /dev/null << EOF
sudo tee "${TEMPDIR}"/inventory > /dev/null << EOF
[ostree_guest]
${GUEST_ADDRESS}
@ -471,7 +471,7 @@ ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/
EOF
# Test IoT/Edge OS
ansible-playbook -v -i ${TEMPDIR}/inventory -e image_type=${IMAGE_TYPE} -e ostree_commit=${UPGRADE_HASH} $(dirname $0)/check_ostree.yaml || RESULTS=0
ansible-playbook -v -i "${TEMPDIR}"/inventory -e image_type=${IMAGE_TYPE} -e ostree_commit="${UPGRADE_HASH}" $(dirname "$0")/check_ostree.yaml || RESULTS=0
check_result
# Final success clean up

View file

@ -105,10 +105,10 @@ COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
smoke_test_check () {
# Ensure the ssh key has restricted permissions.
SSH_KEY=${WORKSPACE}/test/keyring/id_rsa
chmod 0600 $SSH_KEY
chmod 0600 "$SSH_KEY"
SSH_OPTIONS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
SMOKE_TEST=$(ssh $SSH_OPTIONS -i ${SSH_KEY} redhat@${1} 'cat /etc/smoke-test.txt')
SMOKE_TEST=$(ssh "$SSH_OPTIONS" -i "${SSH_KEY}" redhat@"${1}" 'cat /etc/smoke-test.txt')
if [[ $SMOKE_TEST == smoke-test ]]; then
echo 1
else
@ -122,7 +122,7 @@ get_compose_log () {
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.log
# Download the logs.
sudo composer-cli compose log $COMPOSE_ID | tee $LOG_FILE > /dev/null
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
@ -131,15 +131,15 @@ get_compose_metadata () {
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-${IMAGE_TYPE}.json
# Download the metadata.
sudo composer-cli compose metadata $COMPOSE_ID > /dev/null
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename $(find . -maxdepth 1 -type f -name "*-metadata.tar"))
tar -xf $TARBALL
rm -f $TARBALL
tar -xf "$TARBALL"
rm -f "$TARBALL"
# Move the JSON file into place.
cat ${COMPOSE_ID}.json | jq -M '.' | tee $METADATA_FILE > /dev/null
cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Write a basic blueprint for our image.
@ -147,7 +147,7 @@ get_compose_metadata () {
# but it is needed for OpenStack due to issue #698 in osbuild-composer. 😭
# NOTE(mhayden): The cloud-init package isn't included in VHD/Azure images
# by default and it must be added here.
tee $BLUEPRINT_FILE > /dev/null << EOF
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bash"
description = "A base system with bash"
version = "0.0.1"
@ -167,24 +167,24 @@ EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push $BLUEPRINT_FILE
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bash
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | egrep -o "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u ${WORKER_UNIT} &
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Start the compose
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bash $IMAGE_TYPE | tee $COMPOSE_START
COMPOSE_ID=$(jq -r '.build_id' $COMPOSE_START)
sudo composer-cli --json compose start bash "$IMAGE_TYPE" | tee "$COMPOSE_START"
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info ${COMPOSE_ID} | tee $COMPOSE_INFO > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' $COMPOSE_INFO)
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
@ -197,8 +197,8 @@ done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log $COMPOSE_ID
get_compose_metadata $COMPOSE_ID
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
@ -211,21 +211,21 @@ sudo kill ${WORKER_JOURNAL_PID}
# Download the image.
greenprint "📥 Downloading the image"
sudo composer-cli compose image ${COMPOSE_ID} > /dev/null
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME=$(basename $(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}"))
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
sudo mv $IMAGE_FILENAME $LIBVIRT_IMAGE_PATH
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
# Prepare cloud-init data.
CLOUD_INIT_DIR=$(mktemp -d)
cp ${WORKSPACE}/test/cloud-init/{meta,user}-data ${CLOUD_INIT_DIR}/
cp ${WORKSPACE}/test/cloud-init/network-config ${CLOUD_INIT_DIR}/
cp "${WORKSPACE}"/test/cloud-init/{meta,user}-data "${CLOUD_INIT_DIR}"/
cp "${WORKSPACE}"/test/cloud-init/network-config "${CLOUD_INIT_DIR}"/
# Set up a cloud-init ISO.
greenprint "💿 Creating a cloud-init ISO"
CLOUD_INIT_PATH=/var/lib/libvirt/images/seed.iso
rm -f $CLOUD_INIT_PATH
pushd $CLOUD_INIT_DIR
pushd "$CLOUD_INIT_DIR"
sudo genisoimage -o $CLOUD_INIT_PATH -V cidata \
-r -J user-data meta-data network-config 2>&1 > /dev/null
popd
@ -239,10 +239,10 @@ greenprint "🚀 Booting the image with libvirt"
if [[ $ARCH == 'ppc64le' ]]; then
# ppc64le has some machine quirks that must be worked around.
sudo virt-install \
--name $IMAGE_KEY \
--name "$IMAGE_KEY" \
--memory 2048 \
--vcpus 2 \
--disk path=${LIBVIRT_IMAGE_PATH} \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
@ -252,10 +252,10 @@ if [[ $ARCH == 'ppc64le' ]]; then
elif [[ $ARCH == 's390x' ]]; then
# Our s390x machines are highly constrained on resources.
sudo virt-install \
--name $IMAGE_KEY \
--name "$IMAGE_KEY" \
--memory 512 \
--vcpus 1 \
--disk path=${LIBVIRT_IMAGE_PATH} \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
@ -263,10 +263,10 @@ elif [[ $ARCH == 's390x' ]]; then
--network network=integration,mac=34:49:22:B0:83:30
else
sudo virt-install \
--name $IMAGE_KEY \
--name "$IMAGE_KEY" \
--memory 1024 \
--vcpus 2 \
--disk path=${LIBVIRT_IMAGE_PATH} \
--disk path="${LIBVIRT_IMAGE_PATH}" \
--disk path=${CLOUD_INIT_PATH},device=cdrom \
--import \
--os-variant rhel8-unknown \
@ -287,7 +287,7 @@ esac
# Check for our smoke test file.
greenprint "🛃 Checking for smoke test file in VM"
for LOOP_COUNTER in `seq 0 ${MAX_LOOPS}`; do
for LOOP_COUNTER in $(seq 0 ${MAX_LOOPS}); do
RESULTS="$(smoke_test_check $INSTANCE_ADDRESS)"
if [[ $RESULTS == 1 ]]; then
echo "Smoke test passed! 🥳"
@ -298,13 +298,13 @@ done
# Clean up our mess.
greenprint "🧼 Cleaning up"
sudo virsh destroy ${IMAGE_KEY}
sudo virsh destroy "${IMAGE_KEY}"
if [[ $ARCH == aarch64 ]]; then
sudo virsh undefine ${IMAGE_KEY} --nvram
sudo virsh undefine "${IMAGE_KEY}" --nvram
else
sudo virsh undefine ${IMAGE_KEY}
sudo virsh undefine "${IMAGE_KEY}"
fi
sudo rm -f $LIBVIRT_IMAGE_PATH $CLOUD_INIT_PATH
sudo rm -f "$LIBVIRT_IMAGE_PATH" $CLOUD_INIT_PATH
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then