OSBuild - add support for generic S3 services
jobimpl-osbuild --------------- Add GenericS3Creds to struct Add method to create AWS with Endpoint for Generic S3 (with its own credentials file) Move uploading to S3 and result handling to a separate method (along with the special VMDK handling) adjust the AWS S3 case to the new method Implement a new case for uploading to a generic S3 service awscloud -------- Add wrapper methods for endpoint support Set the endpoint to the AWS session Set s3ForcePathStyle to true if endpoint was set Target ------ Define a new target type for the GenericS3Target and Options Handle unmarshaling of the target options and result for the Generic S3 Weldr ----- Add support for only uploading to AWS S3 Define new structures for AWS S3 and Generic S3 (based on AWS S3) Handle unmarshaling of the providers settings' upload settings main ---- Add a section in the main config for the Generic S3 service for credentials If provided pass the credentials file name to the osbuild job implementation Upload Utility -------------- Add upload-generic-s3 utility Makefile ------ Do not fail if the bin directory already exists Tests ----- Add test cases for both AWS and a generic S3 server Add a generic s3_test.sh file for both test cases and add it to the tests RPM spec Adjust the libvirt test case script to support already created images GitLabCI - Extend the libvirt test case to include the two new tests
This commit is contained in:
parent
01880a76a2
commit
bee14bf392
15 changed files with 684 additions and 137 deletions
|
|
@ -17,6 +17,8 @@ IMAGE_TYPE=${1:-qcow2}
|
|||
# Take the boot type passed to the script or use BIOS by default if nothing
|
||||
# was passed.
|
||||
BOOT_TYPE=${2:-bios}
|
||||
# Take the image from the url passes to the script or build it by default if nothing
|
||||
LIBVIRT_IMAGE_URL=${3:-""}
|
||||
|
||||
# Select the file extension based on the image that we are building.
|
||||
IMAGE_EXTENSION=$IMAGE_TYPE
|
||||
|
|
@ -131,82 +133,90 @@ get_compose_metadata () {
|
|||
sudo cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
|
||||
}
|
||||
|
||||
# Write a basic blueprint for our image.
|
||||
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
||||
name = "bp"
|
||||
description = "A base system"
|
||||
version = "0.0.1"
|
||||
EOF
|
||||
|
||||
# Prepare the blueprint for the compose.
|
||||
greenprint "📋 Preparing blueprint"
|
||||
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
|
||||
sudo composer-cli blueprints depsolve bp
|
||||
|
||||
# Get worker unit file so we can watch the journal.
|
||||
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
|
||||
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
|
||||
WORKER_JOURNAL_PID=$!
|
||||
# Stop watching the worker journal when exiting.
|
||||
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
|
||||
|
||||
# Start the compose
|
||||
greenprint "🚀 Starting compose"
|
||||
sudo composer-cli --json compose start bp "$IMAGE_TYPE" | tee "$COMPOSE_START"
|
||||
if rpm -q --quiet weldr-client; then
|
||||
COMPOSE_ID=$(jq -r '.body.build_id' "$COMPOSE_START")
|
||||
else
|
||||
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
|
||||
fi
|
||||
|
||||
# Wait for the compose to finish.
|
||||
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
|
||||
while true; do
|
||||
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
|
||||
if rpm -q --quiet weldr-client; then
|
||||
COMPOSE_STATUS=$(jq -r '.body.queue_status' "$COMPOSE_INFO")
|
||||
else
|
||||
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
|
||||
fi
|
||||
|
||||
# Is the compose finished?
|
||||
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
# Wait 30 seconds and try again.
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Capture the compose logs from osbuild.
|
||||
greenprint "💬 Getting compose log and metadata"
|
||||
get_compose_log "$COMPOSE_ID"
|
||||
get_compose_metadata "$COMPOSE_ID"
|
||||
|
||||
# Kill the journal monitor immediately and remove the trap
|
||||
sudo pkill -P ${WORKER_JOURNAL_PID}
|
||||
trap - EXIT
|
||||
|
||||
# Did the compose finish with success?
|
||||
if [[ $COMPOSE_STATUS != FINISHED ]]; then
|
||||
echo "Something went wrong with the compose. 😢"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Download the image.
|
||||
greenprint "📥 Downloading the image"
|
||||
|
||||
# Current $PWD is inside /tmp, there may not be enough space for an image.
|
||||
# Let's use a bigger temporary directory for this operation.
|
||||
BIG_TEMP_DIR=/var/lib/osbuild-composer-tests
|
||||
sudo rm -rf "${BIG_TEMP_DIR}" || true
|
||||
sudo mkdir "${BIG_TEMP_DIR}"
|
||||
pushd "${BIG_TEMP_DIR}"
|
||||
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
|
||||
IMAGE_FILENAME=$(basename "$(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}")")
|
||||
|
||||
if [ -z "${LIBVIRT_IMAGE_URL}" ]; then
|
||||
# Write a basic blueprint for our image.
|
||||
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
||||
name = "bp"
|
||||
description = "A base system"
|
||||
version = "0.0.1"
|
||||
EOF
|
||||
|
||||
# Prepare the blueprint for the compose.
|
||||
greenprint "📋 Preparing blueprint"
|
||||
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
|
||||
sudo composer-cli blueprints depsolve bp
|
||||
|
||||
# Get worker unit file so we can watch the journal.
|
||||
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
|
||||
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
|
||||
WORKER_JOURNAL_PID=$!
|
||||
# Stop watching the worker journal when exiting.
|
||||
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
|
||||
|
||||
# Start the compose
|
||||
greenprint "🚀 Starting compose"
|
||||
sudo composer-cli --json compose start bp "$IMAGE_TYPE" | tee "$COMPOSE_START"
|
||||
if rpm -q --quiet weldr-client; then
|
||||
COMPOSE_ID=$(jq -r '.body.build_id' "$COMPOSE_START")
|
||||
else
|
||||
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
|
||||
fi
|
||||
|
||||
# Wait for the compose to finish.
|
||||
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
|
||||
while true; do
|
||||
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
|
||||
if rpm -q --quiet weldr-client; then
|
||||
COMPOSE_STATUS=$(jq -r '.body.queue_status' "$COMPOSE_INFO")
|
||||
else
|
||||
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
|
||||
fi
|
||||
|
||||
# Is the compose finished?
|
||||
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
# Wait 30 seconds and try again.
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Capture the compose logs from osbuild.
|
||||
greenprint "💬 Getting compose log and metadata"
|
||||
get_compose_log "$COMPOSE_ID"
|
||||
get_compose_metadata "$COMPOSE_ID"
|
||||
|
||||
# Kill the journal monitor immediately and remove the trap
|
||||
sudo pkill -P ${WORKER_JOURNAL_PID}
|
||||
trap - EXIT
|
||||
|
||||
# Did the compose finish with success?
|
||||
if [[ $COMPOSE_STATUS != FINISHED ]]; then
|
||||
echo "Something went wrong with the compose. 😢"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Download the image.
|
||||
greenprint "📥 Downloading the image"
|
||||
|
||||
pushd "${BIG_TEMP_DIR}"
|
||||
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
|
||||
IMAGE_FILENAME=$(basename "$(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}")")
|
||||
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
|
||||
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
|
||||
popd
|
||||
else
|
||||
pushd "${BIG_TEMP_DIR}"
|
||||
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
|
||||
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
|
||||
popd
|
||||
sudo curl -o "${LIBVIRT_IMAGE_PATH}" "${LIBVIRT_IMAGE_URL}"
|
||||
popd
|
||||
fi
|
||||
|
||||
# Prepare cloud-init data.
|
||||
CLOUD_INIT_DIR=$(mktemp -d)
|
||||
|
|
@ -323,8 +333,10 @@ else
|
|||
fi
|
||||
sudo rm -f "$LIBVIRT_IMAGE_PATH" $CLOUD_INIT_PATH
|
||||
|
||||
# Also delete the compose so we don't run out of disk space
|
||||
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
||||
if [ -z "${LIBVIRT_IMAGE_URL}" ]; then
|
||||
# Also delete the compose so we don't run out of disk space
|
||||
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
||||
fi
|
||||
|
||||
# Use the return code of the smoke test to determine if we passed or failed.
|
||||
if [[ $RESULTS == 1 ]]; then
|
||||
|
|
|
|||
145
tools/s3_test.sh
Executable file
145
tools/s3_test.sh
Executable file
|
|
@ -0,0 +1,145 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
|
||||
|
||||
TEST_ID=${1}
|
||||
S3_PROVIDER_CONFIG_FILE=${2}
|
||||
S3_CHECK_CMD=${3}
|
||||
S3_GET_URL_CMD=${4}
|
||||
S3_DELETE_CMD=${5:-""}
|
||||
|
||||
# Colorful output.
|
||||
function greenprint {
|
||||
echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
|
||||
}
|
||||
|
||||
function get_build_info() {
|
||||
key="$1"
|
||||
fname="$2"
|
||||
if rpm -q --quiet weldr-client; then
|
||||
key=".body${key}"
|
||||
fi
|
||||
jq -r "${key}" "${fname}"
|
||||
}
|
||||
|
||||
TEMPDIR=$(mktemp -d)
|
||||
function cleanup() {
|
||||
sudo rm -rf "$TEMPDIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Jenkins sets WORKSPACE to the job workspace, but if this script runs
|
||||
# outside of Jenkins, we can set up a temporary directory instead.
|
||||
if [[ ${WORKSPACE:-empty} == empty ]]; then
|
||||
WORKSPACE=$(mktemp -d)
|
||||
fi
|
||||
|
||||
# Set up temporary files.
|
||||
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
|
||||
BLUEPRINT_NAME=empty
|
||||
COMPOSE_START=${TEMPDIR}/compose-start-${TEST_ID}.json
|
||||
COMPOSE_INFO=${TEMPDIR}/compose-info-${TEST_ID}.json
|
||||
|
||||
# Get the compose log.
|
||||
get_compose_log () {
|
||||
COMPOSE_ID=$1
|
||||
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.log
|
||||
|
||||
# Download the logs.
|
||||
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
|
||||
}
|
||||
|
||||
# Get the compose metadata.
|
||||
get_compose_metadata () {
|
||||
COMPOSE_ID=$1
|
||||
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.json
|
||||
|
||||
# Download the metadata.
|
||||
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
|
||||
|
||||
# Find the tarball and extract it.
|
||||
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
|
||||
sudo tar -xf "$TARBALL"
|
||||
sudo rm -f "$TARBALL"
|
||||
|
||||
# Move the JSON file into place.
|
||||
sudo cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
|
||||
}
|
||||
|
||||
# Write a basic blueprint for our image.
|
||||
tee "$BLUEPRINT_FILE" > /dev/null << EOF
|
||||
name = "${BLUEPRINT_NAME}"
|
||||
description = "A base system with bash"
|
||||
version = "0.0.1"
|
||||
EOF
|
||||
|
||||
# Prepare the blueprint for the compose.
|
||||
greenprint "📋 Preparing blueprint"
|
||||
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
|
||||
sudo composer-cli blueprints depsolve ${BLUEPRINT_NAME}
|
||||
|
||||
# Get worker unit file so we can watch the journal.
|
||||
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
|
||||
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
|
||||
WORKER_JOURNAL_PID=$!
|
||||
# Stop watching the worker journal when exiting.
|
||||
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
|
||||
|
||||
# Start the compose and upload to AWS.
|
||||
greenprint "🚀 Starting compose"
|
||||
sudo composer-cli --json compose start ${BLUEPRINT_NAME} qcow2 "$TEST_ID" "$S3_PROVIDER_CONFIG_FILE" | tee "$COMPOSE_START"
|
||||
COMPOSE_ID=$(get_build_info ".build_id" "$COMPOSE_START")
|
||||
|
||||
# Wait for the compose to finish.
|
||||
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
|
||||
while true; do
|
||||
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
|
||||
COMPOSE_STATUS=$(get_build_info ".queue_status" "$COMPOSE_INFO")
|
||||
|
||||
# Is the compose finished?
|
||||
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
# Wait 30 seconds and try again.
|
||||
sleep 30
|
||||
done
|
||||
|
||||
# Capture the compose logs from osbuild.
|
||||
greenprint "💬 Getting compose log and metadata"
|
||||
get_compose_log "$COMPOSE_ID"
|
||||
get_compose_metadata "$COMPOSE_ID"
|
||||
|
||||
# Kill the journal monitor immediately and remove the trap
|
||||
sudo pkill -P ${WORKER_JOURNAL_PID}
|
||||
trap - EXIT
|
||||
|
||||
# Did the compose finish with success?
|
||||
if [[ $COMPOSE_STATUS != FINISHED ]]; then
|
||||
echo "Something went wrong with the compose. 😢"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete the compose so we don't run out of disk space
|
||||
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
||||
|
||||
# Find the image that we made in the AWS Bucket
|
||||
greenprint "🔍 Search for created image"
|
||||
if ! bash -c "${S3_CHECK_CMD}"; then
|
||||
echo "Failed to find the image in the S3 Bucket"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function removeImageFromS3() {
|
||||
bash -c "${S3_DELETE_CMD}"
|
||||
}
|
||||
if [ -n "${S3_DELETE_CMD}" ]; then
|
||||
trap removeImageFromS3 EXIT
|
||||
fi
|
||||
|
||||
# Generate a URL for the image
|
||||
QCOW2_IMAGE_URL=$(bash -c "${S3_GET_URL_CMD}")
|
||||
|
||||
# Run the image on KVM
|
||||
/usr/libexec/osbuild-composer-test/libvirt_test.sh qcow2 bios "${QCOW2_IMAGE_URL}"
|
||||
Loading…
Add table
Add a link
Reference in a new issue