OSBuild - add support for generic S3 services

jobimpl-osbuild
---------------
Add GenericS3Creds to struct
Add method to create AWS with Endpoint for Generic S3 (with its own credentials file)
Move uploading to S3 and result handling to a separate method (along with the special VMDK handling)
adjust the AWS S3 case to the new method
Implement a new case for uploading to a generic S3 service

awscloud
--------
Add wrapper methods for endpoint support
Set the endpoint to the AWS session
Set s3ForcePathStyle to true if endpoint was set

Target
------
Define a new target type for the GenericS3Target and Options
Handle unmarshaling of the target options and result for the Generic S3

Weldr
-----
Add support for only uploading to AWS S3
Define new structures for AWS S3 and Generic S3 (based on AWS S3)
Handle unmarshaling of the providers settings' upload settings

main
----
Add a section in the main config for the Generic S3 service for credentials
If provided pass the credentials file name to the osbuild job implementation

Upload Utility
--------------
Add upload-generic-s3 utility

Makefile
------
Do not fail if the bin directory already exists

Tests
-----
Add test cases for both AWS and a generic S3 server
Add a generic s3_test.sh file for both test cases and add it to the tests RPM spec
Adjust the libvirt test case script to support already created images
GitLabCI - Extend the libvirt test case to include the two new tests
This commit is contained in:
Ygal Blum 2022-03-28 15:34:30 +03:00 committed by Tomáš Hozza
parent 01880a76a2
commit bee14bf392
15 changed files with 684 additions and 137 deletions

72
test/cases/aws_s3.sh Executable file
View file

@ -0,0 +1,72 @@
#!/bin/bash
set -euo pipefail
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
# Container image used for cloud provider CLI tools
CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest"
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh
# Check available container runtime
if which podman 2>/dev/null >&2; then
CONTAINER_RUNTIME=podman
elif which docker 2>/dev/null >&2; then
CONTAINER_RUNTIME=docker
else
echo No container runtime found, install podman or docker.
exit 2
fi
TEMPDIR=$(mktemp -d)
function cleanup() {
sudo rm -rf "$TEMPDIR"
}
trap cleanup EXIT
# Generate a string, which can be used as a predictable resource name,
# especially when running the test in CI where we may need to clean up
# resources in case the test unexpectedly fails or is canceled
CI="${CI:-false}"
if [[ "$CI" == true ]]; then
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
else
# if not running in Jenkins, generate ID not relying on specific env variables
TEST_ID=$(uuidgen);
fi
# Set up temporary files.
AWS_S3_PROVIDER_CONFIG=${TEMPDIR}/aws.toml
# We need awscli to talk to AWS.
if ! hash aws; then
echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
-e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION"
else
echo "Using pre-installed 'aws' from the system"
AWS_CMD="aws --region $AWS_REGION"
fi
$AWS_CMD --version
# Write an AWS TOML file
tee "$AWS_S3_PROVIDER_CONFIG" > /dev/null << EOF
provider = "aws.s3"
[settings]
accessKeyID = "${V2_AWS_ACCESS_KEY_ID}"
secretAccessKey = "${V2_AWS_SECRET_ACCESS_KEY}"
bucket = "${AWS_BUCKET}"
region = "${AWS_REGION}"
key = "${TEST_ID}"
EOF
IMAGE_OBJECT_KEY="${AWS_BUCKET}/${TEST_ID}-disk.qcow2"
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${AWS_S3_PROVIDER_CONFIG}" "${AWS_CMD} s3 ls ${IMAGE_OBJECT_KEY}" "${AWS_CMD} s3 presign ${IMAGE_OBJECT_KEY}" "${AWS_CMD} s3 rm s3://${IMAGE_OBJECT_KEY}"

98
test/cases/generic_s3.sh Executable file
View file

@ -0,0 +1,98 @@
#!/bin/bash
set -euo pipefail
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
# Container images for MinIO Server and Client
CONTAINER_MINIO_CLIENT="quay.io/minio/mc:latest"
CONTAINER_MINIO_SERVER="quay.io/minio/minio:latest"
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh
# Check available container runtime
if which podman 2>/dev/null >&2; then
CONTAINER_RUNTIME=podman
elif which docker 2>/dev/null >&2; then
CONTAINER_RUNTIME=docker
else
echo No container runtime found, install podman or docker.
exit 2
fi
TEMPDIR=$(mktemp -d)
function cleanup() {
sudo rm -rf "$TEMPDIR"
}
trap cleanup EXIT
# Generate a string, which can be used as a predictable resource name,
# especially when running the test in CI where we may need to clean up
# resources in case the test unexpectedly fails or is canceled
CI="${CI:-false}"
if [[ "$CI" == true ]]; then
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
else
# if not running in Jenkins, generate ID not relying on specific env variables
TEST_ID=$(uuidgen);
fi
# Set up temporary files.
MINIO_CONFIG_DIR=${TEMPDIR}/minio-config
MINIO_PROVIDER_CONFIG=${TEMPDIR}/minio.toml
# We need MinIO Client to talk to the MinIO Server.
if ! hash mc; then
echo "Using 'mc' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_MINIO_CLIENT}
MC_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-v ${MINIO_CONFIG_DIR}:${MINIO_CONFIG_DIR}:Z \
--network=host \
${CONTAINER_MINIO_CLIENT} --config-dir=${MINIO_CONFIG_DIR}"
else
echo "Using pre-installed 'mc' from the system"
MC_CMD="mc --config-dir=${MINIO_CONFIG_DIR}"
fi
mkdir "${MINIO_CONFIG_DIR}"
$MC_CMD --version
MINIO_CONTAINER_NAME="minio-server"
MINIO_ENDPOINT="http://localhost:9000"
MINIO_ROOT_USER="X29DU5Q6C5NKDQ8PLGVT"
MINIO_ROOT_PASSWORD=$(date +%s | sha256sum | base64 | head -c 32 ; echo)
MINIO_SERVER_ALIAS=local
MINIO_BUCKET="ci-test"
MINIO_REGION="us-east-1"
# Write an AWS TOML file
tee "$MINIO_PROVIDER_CONFIG" > /dev/null << EOF
provider = "generic.s3"
[settings]
endpoint = "${MINIO_ENDPOINT}"
accessKeyID = "${MINIO_ROOT_USER}"
secretAccessKey = "${MINIO_ROOT_PASSWORD}"
bucket = "${MINIO_BUCKET}"
region = "${MINIO_REGION}"
key = "${TEST_ID}"
EOF
# Start the MinIO Server
${CONTAINER_RUNTIME} run --rm -d \
--name ${MINIO_CONTAINER_NAME} \
-p 9000:9000 \
-e MINIO_BROWSER=off \
-e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \
-e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \
${CONTAINER_MINIO_SERVER} server /data
# Kill the server once we're done
trap '${CONTAINER_RUNTIME} kill ${MINIO_CONTAINER_NAME}' EXIT
# Configure the local server
${MC_CMD} alias set ${MINIO_SERVER_ALIAS} ${MINIO_ENDPOINT} ${MINIO_ROOT_USER} "${MINIO_ROOT_PASSWORD}"
# Create the bucket
${MC_CMD} mb ${MINIO_SERVER_ALIAS}/${MINIO_BUCKET}
IMAGE_OBJECT_KEY="${MINIO_SERVER_ALIAS}/${MINIO_BUCKET}/${TEST_ID}-disk.qcow2"
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${MINIO_PROVIDER_CONFIG}" "${MC_CMD} ls ${IMAGE_OBJECT_KEY}" "${MC_CMD} --json share download ${IMAGE_OBJECT_KEY} | jq .share | tr -d '\"'"