debian-forge-composer/test/cases/generic_s3.sh
Tomas Hozza a4b0efb278 provision.sh: add none authentication method for on-premise scenario
`tools/provision.sh` is provisioning SUT always in the same way for
both, the Service scenario and the on-premise scenario. While this is
not causing any issues, it does not realistically represent how we
expect osbuild-composer and worker to be used in these scenarios.

The script currently supports the following authentication options:
- `none`
  - Intended for the on-premise scenario with Weldr API.
  - NO certificates are generated.
  - NO osbuild-composer configuration file is created.
  - NO osbuild-worker configuration file is created. This means that no
    cloud provider credentials are configured directly in the worker.
  - Only the local worker is started and used.
  - Only the Weldr API socker is started.
  - Appropriate repository definitions are copied to
    `/etc/osbuild-composer/repositories/`.
- `jwt`
  - Intended for the Service scenario with Cloud API.
  - Should be the only method supported in the Service scenario in the
    future.
  - Certificates are generated and copied to `/etc/osbuild-composer`.
  - osbuild-composer configuration file is created and configured for
    JWT authentication.
  - osbuild-worker configuration file is created, configured for JWT
    authentication and with appropriate cloud provider credentials.
  - Local worker unit is masked. Only the remote worker is used (the
    socket is started and one remote-worker instance is created).
  - Only the Cloud API socket is started (Weldr API socket is stopped).
  - NO repository definitions are copied to
    `/etc/osbuild-composer/repositories/`.
- `tls`
  - Intended for the Service scenario with Cloud API.
  - Should eventually go away.
  - Certificates are generated and copied to `/etc/osbuild-composer`.
  - osbuild-composer configuration file is created and configured for
    TLS client cert authentication.
  - osbuild-worker configuration file is created, configured for TLS
    authentication and with appropriate cloud provider credentials.
  - Services and sockets are started as they used to be originally:
    - Both local and remote worker sockets are started.
    - Both Weldr and Cloud API sockets are started.
    - Only the local worker unit will be started automatically.
  - NO repository definitions are copied to
    `/etc/osbuild-composer/repositories/`.
2022-08-04 11:55:43 +02:00

126 lines
4 KiB
Bash
Executable file

#!/bin/bash
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
if [ "${NIGHTLY:=false}" == "true" ]; then
case "${ID}-${VERSION_ID}" in
"rhel-8.7" | "rhel-9.1")
echo "$0 is not enabled for ${ID}-${VERSION_ID} skipping..."
exit 0
;;
*)
;;
esac
fi
set -euo pipefail
# Container images for MinIO Server and Client
CONTAINER_MINIO_CLIENT="quay.io/minio/mc:latest"
CONTAINER_MINIO_SERVER="quay.io/minio/minio:latest"
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh none
# Check available container runtime
if which podman 2>/dev/null >&2; then
CONTAINER_RUNTIME=podman
elif which docker 2>/dev/null >&2; then
CONTAINER_RUNTIME=docker
else
echo No container runtime found, install podman or docker.
exit 2
fi
TEMPDIR=$(mktemp -d)
function cleanup() {
echo "== Script execution stopped or finished - Cleaning up =="
sudo rm -rf "$TEMPDIR"
}
trap cleanup EXIT
# Generate a string, which can be used as a predictable resource name,
# especially when running the test in CI where we may need to clean up
# resources in case the test unexpectedly fails or is canceled
CI="${CI:-false}"
if [[ "$CI" == true ]]; then
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
else
# if not running in Jenkins, generate ID not relying on specific env variables
TEST_ID=$(uuidgen);
fi
# Set up temporary files.
MINIO_CONFIG_DIR=${TEMPDIR}/minio-config
MINIO_PROVIDER_CONFIG=${TEMPDIR}/minio.toml
# We need MinIO Client to talk to the MinIO Server.
if ! hash mc; then
echo "Using 'mc' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_MINIO_CLIENT}
MC_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-v ${MINIO_CONFIG_DIR}:${MINIO_CONFIG_DIR}:Z \
--network=host \
${CONTAINER_MINIO_CLIENT} --config-dir=${MINIO_CONFIG_DIR}"
else
echo "Using pre-installed 'mc' from the system"
MC_CMD="mc --config-dir=${MINIO_CONFIG_DIR}"
fi
mkdir "${MINIO_CONFIG_DIR}"
$MC_CMD --version
MINIO_CONTAINER_NAME="minio-server"
MINIO_ENDPOINT="http://localhost:9000"
MINIO_ROOT_USER="X29DU5Q6C5NKDQ8PLGVT"
MINIO_ROOT_PASSWORD=$(date +%s | sha256sum | base64 | head -c 32 ; echo)
MINIO_SERVER_ALIAS=local
MINIO_BUCKET="ci-test"
MINIO_REGION="us-east-1"
# Write an AWS TOML file
tee "$MINIO_PROVIDER_CONFIG" > /dev/null << EOF
provider = "generic.s3"
[settings]
endpoint = "${MINIO_ENDPOINT}"
accessKeyID = "${MINIO_ROOT_USER}"
secretAccessKey = "${MINIO_ROOT_PASSWORD}"
bucket = "${MINIO_BUCKET}"
region = "${MINIO_REGION}"
key = "${TEST_ID}"
EOF
# Start the MinIO Server
${CONTAINER_RUNTIME} run --rm -d \
--name ${MINIO_CONTAINER_NAME} \
-p 9000:9000 \
-e MINIO_BROWSER=off \
-e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \
-e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \
${CONTAINER_MINIO_SERVER} server /data
# Kill the server once we're done
trap '${CONTAINER_RUNTIME} kill ${MINIO_CONTAINER_NAME}' EXIT
# Configure the local server (retry until the service is up)
MINIO_CONFIGURE_RETRY=0
MINIO_CONFIGURE_MAX_RETRY=5
MINIO_RETRY_INTERVAL=15
until [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]
do
${MC_CMD} alias set ${MINIO_SERVER_ALIAS} ${MINIO_ENDPOINT} ${MINIO_ROOT_USER} "${MINIO_ROOT_PASSWORD}" && break
MINIO_CONFIGURE_RETRY=$(${MINIO_CONFIGURE_RETRY} + 1)
echo "Retrying [${MINIO_CONFIGURE_RETRY}/${MINIO_CONFIGURE_MAX_RETRY}] in ${MINIO_RETRY_INTERVAL}(s) "
sleep ${MINIO_RETRY_INTERVAL}
done
if [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]; then
echo "Failed to set MinIO alias after ${MINIO_CONFIGURE_MAX_RETRY} attempts!"
exit 1
fi
# Create the bucket
${MC_CMD} mb ${MINIO_SERVER_ALIAS}/${MINIO_BUCKET}
IMAGE_OBJECT_KEY="${MINIO_SERVER_ALIAS}/${MINIO_BUCKET}/${TEST_ID}-disk.qcow2"
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${MINIO_PROVIDER_CONFIG}" "${MC_CMD} ls ${IMAGE_OBJECT_KEY}" "${MC_CMD} --json share download ${IMAGE_OBJECT_KEY} | jq .share | tr -d '\"'"