From 010a1f5022b7b65878cf2bafd949ae68e7e612ad Mon Sep 17 00:00:00 2001 From: Thomas Lavocat Date: Thu, 23 Sep 2021 16:40:41 +0200 Subject: [PATCH] worker: Configure AWS credentials in the worker --- cmd/osbuild-worker/jobimpl-osbuild.go | 16 +- cmd/osbuild-worker/main.go | 12 + internal/upload/awsupload/awsupload.go | 24 +- test/cases/api_v2.sh | 735 +++++++++++++++++++++++++ 4 files changed, 781 insertions(+), 6 deletions(-) create mode 100755 test/cases/api_v2.sh diff --git a/cmd/osbuild-worker/jobimpl-osbuild.go b/cmd/osbuild-worker/jobimpl-osbuild.go index a53dcef63..d5d25cd4c 100644 --- a/cmd/osbuild-worker/jobimpl-osbuild.go +++ b/cmd/osbuild-worker/jobimpl-osbuild.go @@ -28,6 +28,7 @@ type OSBuildJobImpl struct { KojiServers map[string]koji.GSSAPICredentials GCPCreds []byte AzureCreds *azure.Credentials + AWSCreds string } func appendTargetError(res *worker.OSBuildJobResult, err error) { @@ -36,6 +37,17 @@ func appendTargetError(res *worker.OSBuildJobResult, err error) { res.TargetErrors = append(res.TargetErrors, errStr) } +// Returns an *awsupload.AWS object with the credentials of the request. If they +// are not accessible, then try to use the one obtained in the worker +// configuration. +func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string, token string) (*awsupload.AWS, error) { + if accessId != "" && secret != "" { + return awsupload.New(region, accessId, secret, token) + } else { + return awsupload.NewFromFile(impl.AWSCreds, region) + } +} + func (impl *OSBuildJobImpl) Run(job worker.Job) error { // Initialize variable needed for reporting back to osbuild-composer. var osbuildJobResult *worker.OSBuildJobResult = &worker.OSBuildJobResult{ @@ -206,7 +218,7 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error { osbuildJobResult.Success = true osbuildJobResult.UploadStatus = "success" case *target.AWSTargetOptions: - a, err := awsupload.New(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken) + a, err := impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken) if err != nil { appendTargetError(osbuildJobResult, err) return nil @@ -242,7 +254,7 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error { osbuildJobResult.Success = true osbuildJobResult.UploadStatus = "success" case *target.AWSS3TargetOptions: - a, err := awsupload.New(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken) + a, err := impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken) if err != nil { appendTargetError(osbuildJobResult, err) return nil diff --git a/cmd/osbuild-worker/main.go b/cmd/osbuild-worker/main.go index b52eaa249..b8aaaff79 100644 --- a/cmd/osbuild-worker/main.go +++ b/cmd/osbuild-worker/main.go @@ -97,6 +97,9 @@ func main() { Azure *struct { Credentials string `toml:"credentials"` } `toml:"azure"` + AWS *struct { + Credentials string `toml:"credentials"` + } `toml:"aws"` Authentication *struct { OAuthURL string `toml:"oauth_url"` OfflineTokenPath string `toml:"offline_token"` @@ -232,6 +235,14 @@ func main() { } } + // If the credentials are not provided in the configuration, then the + // worker will look in $HOME/.aws/credentials or at the file pointed by + // the "AWS_SHARED_CREDENTIALS_FILE" variable. + var awsCredentials = "" + if config.AWS != nil { + awsCredentials = config.AWS.Credentials + } + jobImpls := map[string]JobImplementation{ "osbuild": &OSBuildJobImpl{ Store: store, @@ -239,6 +250,7 @@ func main() { KojiServers: kojiServers, GCPCreds: gcpCredentials, AzureCreds: azureCredentials, + AWSCreds: awsCredentials, }, "osbuild-koji": &OSBuildKojiJobImpl{ Store: store, diff --git a/internal/upload/awsupload/awsupload.go b/internal/upload/awsupload/awsupload.go index abdc06bad..80739b919 100644 --- a/internal/upload/awsupload/awsupload.go +++ b/internal/upload/awsupload/awsupload.go @@ -21,10 +21,8 @@ type AWS struct { s3 *s3.S3 } -func New(region, accessKeyID, accessKey, sessionToken string) (*AWS, error) { - // Session credentials - creds := credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken) - +// Create a new session from the credentials and the region and returns an *AWS object initialized with it. +func newAwsFromCreds(creds *credentials.Credentials, region string) (*AWS, error) { // Create a Session with a custom region sess, err := session.NewSession(&aws.Config{ Credentials: creds, @@ -41,6 +39,24 @@ func New(region, accessKeyID, accessKey, sessionToken string) (*AWS, error) { }, nil } +// Initialize a new AWS object from individual bits. SessionToken is optional +func New(region string, accessKeyID string, accessKey string, sessionToken string) (*AWS, error) { + return newAwsFromCreds(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region) +} + +// Initializes a new AWS object with the credentials info found at filename's location. +// The credential files should match the AWS format, such as: +// [default] +// aws_access_key_id = secretString1 +// aws_secret_access_key = secretString2 +// +// If filename is empty the underlying function will look for the +// "AWS_SHARED_CREDENTIALS_FILE" env variable or will default to +// $HOME/.aws/credentials. +func NewFromFile(filename string, region string) (*AWS, error) { + return newAwsFromCreds(credentials.NewSharedCredentials(filename, "default"), region) +} + func (a *AWS) Upload(filename, bucket, key string) (*s3manager.UploadOutput, error) { file, err := os.Open(filename) if err != nil { diff --git a/test/cases/api_v2.sh b/test/cases/api_v2.sh new file mode 100755 index 000000000..c8010a706 --- /dev/null +++ b/test/cases/api_v2.sh @@ -0,0 +1,735 @@ +#!/usr/bin/bash + +# +# Test osbuild-composer's main API endpoint by building a sample image and +# uploading it to the appropriate cloud provider. The test currently supports +# AWS and GCP. +# +# This script sets `-x` and is meant to always be run like that. This is +# simpler than adding extensive error reporting, which would make this script +# considerably more complex. Also, the full trace this produces is very useful +# for the primary audience: developers of osbuild-composer looking at the log +# from a run on a remote continuous integration system. +# + +set -euxo pipefail + +ARTIFACTS=ci-artifacts +mkdir -p "${ARTIFACTS}" + +source /etc/os-release +DISTRO_CODE="${DISTRO_CODE:-${ID}_${VERSION_ID//./}}" + +# Container image used for cloud provider CLI tools +CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest" + +# +# Provision the software under test. +# + +/usr/libexec/osbuild-composer-test/provision.sh + +# +# Set up the database queue +# +if which podman 2>/dev/null >&2; then + CONTAINER_RUNTIME=podman +elif which docker 2>/dev/null >&2; then + CONTAINER_RUNTIME=docker +else + echo No container runtime found, install podman or docker. + exit 2 +fi + +# Start the db +sudo ${CONTAINER_RUNTIME} run -d --name osbuild-composer-db \ + --health-cmd "pg_isready -U postgres -d osbuildcomposer" --health-interval 2s \ + --health-timeout 2s --health-retries 10 \ + -e POSTGRES_USER=postgres \ + -e POSTGRES_PASSWORD=foobar \ + -e POSTGRES_DB=osbuildcomposer \ + -p 5432:5432 \ + quay.io/osbuild/postgres:13-alpine + +# Dump the logs once to have a little more output +sudo ${CONTAINER_RUNTIME} logs osbuild-composer-db + +# Initialize a module in a temp dir so we can get tern without introducing +# vendoring inconsistency +pushd "$(mktemp -d)" +sudo dnf install -y go +go mod init temp +go get github.com/jackc/tern +PGUSER=postgres PGPASSWORD=foobar PGDATABASE=osbuildcomposer PGHOST=localhost PGPORT=5432 \ + go run github.com/jackc/tern migrate -m /usr/share/tests/osbuild-composer/schemas +popd + +function configure_composer() { + cat < /dev/null +} + +# Check that needed variables are set to register to RHSM (RHEL only) +function checkEnvSubscription() { + printenv API_TEST_SUBSCRIPTION_ORG_ID API_TEST_SUBSCRIPTION_ACTIVATION_KEY > /dev/null +} + +case $CLOUD_PROVIDER in + "$CLOUD_PROVIDER_AWS" | "$CLOUD_PROVIDER_AWS_S3") + checkEnvAWS + ;; +esac +[[ "$ID" == "rhel" ]] && checkEnvSubscription + +# +# Create a temporary directory and ensure it gets deleted when this script +# terminates in any way. +# + +function cleanupAWS() { + # since this function can be called at any time, ensure that we don't expand unbound variables + AWS_CMD="${AWS_CMD:-}" + AWS_INSTANCE_ID="${AWS_INSTANCE_ID:-}" + AMI_IMAGE_ID="${AMI_IMAGE_ID:-}" + AWS_SNAPSHOT_ID="${AWS_SNAPSHOT_ID:-}" + + if [ -n "$AWS_CMD" ]; then + set +e + $AWS_CMD ec2 terminate-instances --instance-ids "$AWS_INSTANCE_ID" + $AWS_CMD ec2 deregister-image --image-id "$AMI_IMAGE_ID" + $AWS_CMD ec2 delete-snapshot --snapshot-id "$AWS_SNAPSHOT_ID" + $AWS_CMD ec2 delete-key-pair --key-name "key-for-$AMI_IMAGE_ID" + set -e + fi +} + +function cleanupAWSS3() { + echo "mock cleanup" +} + +function cleanupGCP() { + echo "mock cleanup" +} + +function cleanupAzure() { + echo "mock cleanup" +} + +WORKDIR=$(mktemp -d) +KILL_PIDS=() +function cleanup() { + case $CLOUD_PROVIDER in + "$CLOUD_PROVIDER_AWS") + cleanupAWS + ;; + "$CLOUD_PROVIDER_AWS_S3") + cleanupAWSS3 + ;; + "$CLOUD_PROVIDER_GCP") + cleanupGCP + ;; + "$CLOUD_PROVIDER_AZURE") + cleanupAzure + ;; + esac + + sudo rm -rf "$WORKDIR" + + for P in "${KILL_PIDS[@]}"; do + sudo pkill -P "$P" + done +} +trap cleanup EXIT + +# +# Install the necessary cloud provider client tools +# + +function installClientAWS() { + if ! hash aws; then + echo "Using 'awscli' from a container" + sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS} + + AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ + -e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ + -e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ + -v ${WORKDIR}:${WORKDIR}:Z \ + ${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on" + else + echo "Using pre-installed 'aws' from the system" + AWS_CMD="aws --region $AWS_REGION --output json --color on" + fi + $AWS_CMD --version +} + + +case $CLOUD_PROVIDER in + "$CLOUD_PROVIDER_AWS" ) + installClientAWS + ;; +esac + +# +# Make sure /openapi.json and /version endpoints return success +# + +curl \ + --silent \ + --show-error \ + --cacert /etc/osbuild-composer/ca-crt.pem \ + --key /etc/osbuild-composer/client-key.pem \ + --cert /etc/osbuild-composer/client-crt.pem \ + https://localhost/api/image-builder-composer/v2/openapi | jq . + +# +# Prepare a request to be sent to the composer API. +# + +REQUEST_FILE="${WORKDIR}/request.json" +ARCH=$(uname -m) +SSH_USER= + +case $(set +x; . /etc/os-release; echo "$ID-$VERSION_ID") in + "rhel-9.0") + DISTRO="rhel-90" + if [[ "$CLOUD_PROVIDER" == "$CLOUD_PROVIDER_AWS" ]]; then + SSH_USER="ec2-user" + else + SSH_USER="cloud-user" + fi + ;; + "rhel-8.5") + DISTRO="rhel-85" + if [[ "$CLOUD_PROVIDER" == "$CLOUD_PROVIDER_AWS" ]]; then + SSH_USER="ec2-user" + else + SSH_USER="cloud-user" + fi + ;; + "rhel-8.4") + DISTRO="rhel-84" + SSH_USER="cloud-user" + ;; + "rhel-8.2" | "rhel-8.3") + DISTRO="rhel-8" + SSH_USER="cloud-user" + ;; + "fedora-33") + DISTRO="fedora-33" + SSH_USER="fedora" + ;; + "centos-8") + DISTRO="centos-8" + SSH_USER="cloud-user" + ;; +esac + +# Only RHEL need subscription block. +if [[ "$ID" == "rhel" ]]; then + SUBSCRIPTION_BLOCK=$(cat < "$REQUEST_FILE" << EOF +{ + "distribution": "$DISTRO", + "customizations": { + "packages": [ + "postgresql" + ]${SUBSCRIPTION_BLOCK}, + "users":[ + { + "name": "user1", + "groups": ["wheel"], + "key": "$(cat /tmp/usertest.pub)" + }, + { + "name": "user2", + "key": "$(cat /tmp/usertest.pub)" + } + ] + }, + "image_requests": [ + { + "architecture": "$ARCH", + "image_type": "aws", + "repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json), + "upload_options": { + "region": "${AWS_REGION}", + "share_with_accounts": ["${AWS_API_TEST_SHARE_ACCOUNT}"], + "snapshot_name": "${AWS_SNAPSHOT_NAME}" + } + } + ] +} +EOF +} + +case $CLOUD_PROVIDER in + "$CLOUD_PROVIDER_AWS") + createReqFileAWS + ;; +esac + +# +# Send the request and wait for the job to finish. +# +# Separate `curl` and `jq` commands here, because piping them together hides +# the server's response in case of an error. +# + +function collectMetrics(){ + METRICS_OUTPUT=$(curl \ + --cacert /etc/osbuild-composer/ca-crt.pem \ + --key /etc/osbuild-composer/client-key.pem \ + --cert /etc/osbuild-composer/client-crt.pem \ + https://localhost/metrics) + + echo "$METRICS_OUTPUT" | grep "^total_compose_requests" | cut -f2 -d' ' +} + +function sendCompose() { + + if [[ "$ID" == "rhel" ]]; then + echo "rhel --------------------------------- " + cat "$REQUEST_FILE" + echo "rhel --------------------------------- " + fi + OUTPUT=$(curl \ + --silent \ + --show-error \ + --cacert /etc/osbuild-composer/ca-crt.pem \ + --key /etc/osbuild-composer/client-key.pem \ + --cert /etc/osbuild-composer/client-crt.pem \ + --header 'Content-Type: application/json' \ + --request POST \ + --data @"$REQUEST_FILE" \ + https://localhost/api/image-builder-composer/v2/compose) + + COMPOSE_ID=$(echo "$OUTPUT" | jq -r '.id') +} + +function waitForState() { + local DESIRED_STATE="${1:-success}" + while true + do + OUTPUT=$(curl \ + --silent \ + --show-error \ + --cacert /etc/osbuild-composer/ca-crt.pem \ + --key /etc/osbuild-composer/client-key.pem \ + --cert /etc/osbuild-composer/client-crt.pem \ + https://localhost/api/image-builder-composer/v2/compose/"$COMPOSE_ID") + + COMPOSE_STATUS=$(echo "$OUTPUT" | jq -r '.image_status.status') + UPLOAD_STATUS=$(echo "$OUTPUT" | jq -r '.image_status.upload_status.status') + UPLOAD_TYPE=$(echo "$OUTPUT" | jq -r '.image_status.upload_status.type') + UPLOAD_OPTIONS=$(echo "$OUTPUT" | jq -r '.image_status.upload_status.options') + + case "$COMPOSE_STATUS" in + "$DESIRED_STATE") + break + ;; + # all valid status values for a compose which hasn't finished yet + "pending"|"building"|"uploading"|"registering") + ;; + # default undesired state + "failure") + echo "Image compose failed" + exit 1 + ;; + *) + echo "API returned unexpected image_status.status value: '$COMPOSE_STATUS'" + exit 1 + ;; + esac + + sleep 30 + done +} + +sendCompose + +# crashed/stopped/killed worker should result in a failed state +waitForState "building" +sudo systemctl stop "osbuild-worker@*" +waitForState "failure" +sudo systemctl start "osbuild-worker@1" + +# full integration case +INIT_COMPOSES="$(collectMetrics)" +sendCompose +waitForState +SUBS_COMPOSES="$(collectMetrics)" + +test "$UPLOAD_STATUS" = "success" +test "$UPLOAD_TYPE" = "$CLOUD_PROVIDER" +test $((INIT_COMPOSES+1)) = "$SUBS_COMPOSES" + + +# Make sure we get 1 job entry in the db per compose +sudo podman exec osbuild-composer-db psql -U postgres -d osbuildcomposer -c "SELECT COUNT(*) FROM jobs;" + +# +# Save the Manifest from the osbuild-composer store +# NOTE: The rest of the job data can contain sensitive information +# +# Suppressing shellcheck. See https://github.com/koalaman/shellcheck/wiki/SC2024#exceptions +sudo podman exec osbuild-composer-db psql -U postgres -d osbuildcomposer -c "SELECT args->>'Manifest' FROM jobs" | sudo tee "${ARTIFACTS}/manifest.json" + +# +# Verify the Cloud-provider specific upload_status options +# + +function checkUploadStatusOptionsAWS() { + local AMI + AMI=$(echo "$UPLOAD_OPTIONS" | jq -r '.ami') + local REGION + REGION=$(echo "$UPLOAD_OPTIONS" | jq -r '.region') + + # AWS ID consist of resource identifier followed by a 17-character string + echo "$AMI" | grep -e 'ami-[[:alnum:]]\{17\}' - + test "$REGION" = "$AWS_REGION" +} + +case $CLOUD_PROVIDER in + "$CLOUD_PROVIDER_AWS") + checkUploadStatusOptionsAWS + ;; +esac + +# +# Verify the image landed in the appropriate cloud provider, and delete it. +# + +# Reusable function, which waits for a given host to respond to SSH +function _instanceWaitSSH() { + local HOST="$1" + + for LOOP_COUNTER in {0..30}; do + if ssh-keyscan "$HOST" > /dev/null 2>&1; then + echo "SSH is up!" + # ssh-keyscan "$PUBLIC_IP" | sudo tee -a /root/.ssh/known_hosts + break + fi + echo "Retrying in 5 seconds... $LOOP_COUNTER" + sleep 5 + done +} + +function _instanceCheck() { + echo "✔️ Instance checking" + local _ssh="$1" + + # Check if postgres is installed + $_ssh rpm -q postgresql + + # Verify subscribe status. Loop check since the system may not be registered such early(RHEL only) + if [[ "$ID" == "rhel" ]]; then + set +eu + for LOOP_COUNTER in {1..10}; do + subscribe_org_id=$($_ssh sudo subscription-manager identity | grep 'org ID') + if [[ "$subscribe_org_id" == "org ID: $API_TEST_SUBSCRIPTION_ORG_ID" ]]; then + echo "System is subscribed." + break + else + echo "System is not subscribed. Retrying in 30 seconds...($LOOP_COUNTER/10)" + sleep 30 + fi + done + set -eu + [[ "$subscribe_org_id" == "org ID: $API_TEST_SUBSCRIPTION_ORG_ID" ]] + + # Unregister subscription + $_ssh sudo subscription-manager unregister + else + echo "Not RHEL OS. Skip subscription check." + fi +} + +# Verify image in EC2 on AWS +function verifyInAWS() { + $AWS_CMD ec2 describe-images \ + --owners self \ + --filters Name=name,Values="$AWS_SNAPSHOT_NAME" \ + > "$WORKDIR/ami.json" + + AMI_IMAGE_ID=$(jq -r '.Images[].ImageId' "$WORKDIR/ami.json") + AWS_SNAPSHOT_ID=$(jq -r '.Images[].BlockDeviceMappings[].Ebs.SnapshotId' "$WORKDIR/ami.json") + SHARE_OK=1 + + # Verify that the ec2 snapshot was shared + $AWS_CMD ec2 describe-snapshot-attribute --snapshot-id "$AWS_SNAPSHOT_ID" --attribute createVolumePermission > "$WORKDIR/snapshot-attributes.json" + + SHARED_ID=$(jq -r '.CreateVolumePermissions[0].UserId' "$WORKDIR/snapshot-attributes.json") + if [ "$AWS_API_TEST_SHARE_ACCOUNT" != "$SHARED_ID" ]; then + SHARE_OK=0 + fi + + # Verify that the ec2 ami was shared + $AWS_CMD ec2 describe-image-attribute --image-id "$AMI_IMAGE_ID" --attribute launchPermission > "$WORKDIR/ami-attributes.json" + + SHARED_ID=$(jq -r '.LaunchPermissions[0].UserId' "$WORKDIR/ami-attributes.json") + if [ "$AWS_API_TEST_SHARE_ACCOUNT" != "$SHARED_ID" ]; then + SHARE_OK=0 + fi + + if [ "$SHARE_OK" != 1 ]; then + echo "EC2 snapshot wasn't shared with the AWS_API_TEST_SHARE_ACCOUNT. 😢" + exit 1 + fi + + # Create key-pair + $AWS_CMD ec2 create-key-pair --key-name "key-for-$AMI_IMAGE_ID" --query 'KeyMaterial' --output text > keypair.pem + chmod 400 ./keypair.pem + + # Create an instance based on the ami + $AWS_CMD ec2 run-instances --image-id "$AMI_IMAGE_ID" --count 1 --instance-type t2.micro --key-name "key-for-$AMI_IMAGE_ID" > "$WORKDIR/instances.json" + AWS_INSTANCE_ID=$(jq -r '.Instances[].InstanceId' "$WORKDIR/instances.json") + + $AWS_CMD ec2 wait instance-running --instance-ids "$AWS_INSTANCE_ID" + + $AWS_CMD ec2 describe-instances --instance-ids "$AWS_INSTANCE_ID" > "$WORKDIR/instances.json" + HOST=$(jq -r '.Reservations[].Instances[].PublicIpAddress' "$WORKDIR/instances.json") + + echo "⏱ Waiting for AWS instance to respond to ssh" + _instanceWaitSSH "$HOST" + + # Verify image + _ssh="ssh -oStrictHostKeyChecking=no -i ./keypair.pem $SSH_USER@$HOST" + _instanceCheck "$_ssh" + + # Check access to user1 and user2 + check_groups=$(ssh -i /tmp/usertest "user1@$HOST" -t 'groups') + if [[ $check_groups =~ "wheel" ]]; then + echo "✔️ user1 has the group wheel" + else + echo 'user1 should have the group wheel 😢' + exit 1 + fi + check_groups=$(ssh -i /tmp/usertest "user2@$HOST" -t 'groups') + if [[ $check_groups =~ "wheel" ]]; then + echo 'user2 should not have group wheel 😢' + exit 1 + else + echo "✔️ user2 does not have the group wheel" + fi +} + + +case $CLOUD_PROVIDER in + "$CLOUD_PROVIDER_AWS") + verifyInAWS + ;; +esac + +# Verify selected package (postgresql) is included in package list +function verifyPackageList() { + # Save build metadata to artifacts directory for troubleshooting + curl --silent \ + --show-error \ + --cacert /etc/osbuild-composer/ca-crt.pem \ + --key /etc/osbuild-composer/client-key.pem \ + --cert /etc/osbuild-composer/client-crt.pem \ + https://localhost/api/image-builder-composer/v2/compose/"$COMPOSE_ID"/metadata --output "${ARTIFACTS}/metadata.json" + local PACKAGENAMES + PACKAGENAMES=$(jq -rM '.packages[].name' "${ARTIFACTS}/metadata.json") + + if ! grep -q postgresql <<< "${PACKAGENAMES}"; then + echo "'postgresql' not found in compose package list 😠" + exit 1 + fi +} + +verifyPackageList + +# +# Make sure that requesting a non existing paquet returns a 400 error +# +REQUEST_FILE2="${WORKDIR}/request2.json" +jq '.customizations.packages = [ "jesuisunpaquetquinexistepas" ]' "$REQUEST_FILE" > "$REQUEST_FILE2" + +[ "$(curl \ + --silent \ + --cacert /etc/osbuild-composer/ca-crt.pem \ + --key /etc/osbuild-composer/client-key.pem \ + --cert /etc/osbuild-composer/client-crt.pem \ + --output /dev/null \ + --write-out '%{http_code}' \ + -H "Content-Type: application/json" \ + --data @"$REQUEST_FILE2" \ + https://localhost/api/image-builder-composer/v2/compose)" = "400" ] + +# +# Make sure that a request that makes the dnf-json crash returns a 500 error +# +sudo cp -f /usr/libexec/osbuild-composer/dnf-json /usr/libexec/osbuild-composer/dnf-json.bak +sudo cat << EOF | sudo tee /usr/libexec/osbuild-composer/dnf-json +#!/usr/bin/python3 +raise Exception() +EOF +[ "$(curl \ + --silent \ + --cacert /etc/osbuild-composer/ca-crt.pem \ + --key /etc/osbuild-composer/client-key.pem \ + --cert /etc/osbuild-composer/client-crt.pem \ + --output /dev/null \ + --write-out '%{http_code}' \ + -H "Content-Type: application/json" \ + --data @"$REQUEST_FILE2" \ + https://localhost/api/image-builder-composer/v2/compose)" = "500" ] + +sudo mv -f /usr/libexec/osbuild-composer/dnf-json.bak /usr/libexec/osbuild-composer/dnf-json + +# +# Verify oauth2 +# +cat <