test/api: split into smaller files

Each cloud now has its own file that's sourced on-demand by the main api.sh
script. The main goal of this commit is to reduce the amount of clutter in
api.sh. I, personally, find 1300 lines of bash overwhelming and I think that
this is a reasonable beginning to start cleaning things up.

Signed-off-by: Ondřej Budai <ondrej@budai.cz>
This commit is contained in:
Ondřej Budai 2022-02-22 17:39:03 +01:00 committed by Achilleas Koutsou
parent 767283b2d9
commit cb7c0283a5
8 changed files with 783 additions and 1208 deletions

View file

@ -214,6 +214,12 @@ install -m 0755 -vp tools/test-case-generators/generate-test-cases %{buildroot}%
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer
install -m 0755 -vp test/cases/* %{buildroot}%{_libexecdir}/tests/osbuild-composer/
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer/api
install -m 0755 -vp test/cases/api/* %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/common
install -m 0755 -vp test/cases/api/common/* %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/common/
install -m 0755 -vd %{buildroot}%{_datadir}/tests/osbuild-composer/ansible
install -m 0644 -vp test/data/ansible/* %{buildroot}%{_datadir}/tests/osbuild-composer/ansible/

File diff suppressed because it is too large Load diff

188
test/cases/api/aws.s3.sh Normal file
View file

@ -0,0 +1,188 @@
#!/usr/bin/bash
source /usr/libexec/tests/osbuild-composer/api/common/aws.sh
#
# Global var for ostree ref
#
OSTREE_REF="test/rhel/8/edge"
function cleanup() {
local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
# extract filename component from URL
local S3_FILENAME
S3_FILENAME=$(echo "${S3_URL}" | grep -oP '(?<=/)[^/]+(?=\?)')
# prepend bucket
local S3_URI
S3_URI="s3://${AWS_BUCKET}/${S3_FILENAME}"
# since this function can be called at any time, ensure that we don't expand unbound variables
AWS_CMD="${AWS_CMD:-}"
if [ -n "$AWS_CMD" ]; then
$AWS_CMD s3 rm "${S3_URI}"
fi
}
function createReqFile() {
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
]${SUBSCRIPTION_BLOCK},
"users":[
{
"name": "user1",
"groups": ["wheel"],
"key": "$(cat "${WORKDIR}/usertest.pub")"
},
{
"name": "user2",
"key": "$(cat "${WORKDIR}/usertest.pub")"
}
]
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"ostree": {
"ref": "${OSTREE_REF}"
},
"upload_options": {
"region": "${AWS_REGION}"
}
}
}
EOF
}
function checkUploadStatusOptions() {
local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
# S3 URL contains region and bucket name
echo "$S3_URL" | grep -F "$AWS_BUCKET" -
echo "$S3_URL" | grep -F "$AWS_REGION" -
}
# verify edge commit content
function verifyEdgeCommit() {
filename="$1"
greenprint "Verifying contents of ${filename}"
# extract tarball and save file list to artifacts directroy
local COMMIT_DIR
COMMIT_DIR="${WORKDIR}/edge-commit"
mkdir -p "${COMMIT_DIR}"
tar xvf "${filename}" -C "${COMMIT_DIR}" > "${ARTIFACTS}/edge-commit-filelist.txt"
# Verify that the commit contains the ref we defined in the request
sudo dnf install -y ostree
local COMMIT_REF
COMMIT_REF=$(ostree refs --repo "${COMMIT_DIR}/repo")
if [[ "${COMMIT_REF}" != "${OSTREE_REF}" ]]; then
echo "Commit ref in archive does not match request 😠"
exit 1
fi
local TAR_COMMIT_ID
TAR_COMMIT_ID=$(ostree rev-parse --repo "${COMMIT_DIR}/repo" "${OSTREE_REF}")
API_COMMIT_ID_V2=$(curl \
--silent \
--show-error \
--cacert /etc/osbuild-composer/ca-crt.pem \
--key /etc/osbuild-composer/client-key.pem \
--cert /etc/osbuild-composer/client-crt.pem \
https://localhost/api/image-builder-composer/v2/composes/"$COMPOSE_ID"/metadata | jq -r '.ostree_commit')
if [[ "${API_COMMIT_ID_V2}" != "${TAR_COMMIT_ID}" ]]; then
echo "Commit ID returned from API does not match Commit ID in archive 😠"
exit 1
fi
}
# Verify image blobs from s3
function verifyDisk() {
filename="$1"
greenprint "Verifying contents of ${filename}"
infofile="${filename}-info.json"
sudo /usr/libexec/osbuild-composer-test/image-info "${filename}" | tee "${infofile}" > /dev/null
# save image info to artifacts
cp -v "${infofile}" "${ARTIFACTS}/image-info.json"
# check compose request users in passwd
if ! jq .passwd "${infofile}" | grep -q "user1"; then
greenprint "❌ user1 not found in passwd file"
exit 1
fi
if ! jq .passwd "${infofile}" | grep -q "user2"; then
greenprint "❌ user2 not found in passwd file"
exit 1
fi
# check packages for postgresql
if ! jq .packages "${infofile}" | grep -q "postgresql"; then
greenprint "❌ postgresql not found in packages"
exit 1
fi
greenprint "${filename} image info verified"
}
# Verify s3 blobs
function verify() {
local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
greenprint "Verifying S3 object at ${S3_URL}"
# Tag the resource as a test file
local S3_FILENAME
S3_FILENAME=$(echo "${S3_URL}" | grep -oP '(?<=/)[^/]+(?=\?)')
# tag the object, also verifying that it exists in the bucket as expected
$AWS_CMD s3api put-object-tagging \
--bucket "${AWS_BUCKET}" \
--key "${S3_FILENAME}" \
--tagging '{"TagSet": [{ "Key": "gitlab-ci-test", "Value": "true" }]}'
greenprint "✅ Successfully tagged S3 object"
# Download the object using the Presigned URL and inspect
case ${IMAGE_TYPE} in
"$IMAGE_TYPE_EDGE_COMMIT")
curl "${S3_URL}" --output "${WORKDIR}/edge-commit.tar"
verifyEdgeCommit "${WORKDIR}/edge-commit.tar"
;;
"${IMAGE_TYPE_GUEST}")
curl "${S3_URL}" --output "${WORKDIR}/disk.qcow2"
verifyDisk "${WORKDIR}/disk.qcow2"
;;
"${IMAGE_TYPE_VSPHERE}")
curl "${S3_URL}" --output "${WORKDIR}/disk.vmdk"
verifyDisk "${WORKDIR}/disk.vmdk"
;;
*)
greenprint "No validation method for image type ${IMAGE_TYPE}"
;;
esac
greenprint "✅ Successfully verified S3 object"
}

156
test/cases/api/aws.sh Normal file
View file

@ -0,0 +1,156 @@
#!/usr/bin/bash
source /usr/libexec/tests/osbuild-composer/api/common/aws.sh
source /usr/libexec/tests/osbuild-composer/api/common/common.sh
function cleanup() {
# since this function can be called at any time, ensure that we don't expand unbound variables
AWS_CMD="${AWS_CMD:-}"
AWS_INSTANCE_ID="${AWS_INSTANCE_ID:-}"
AMI_IMAGE_ID="${AMI_IMAGE_ID:-}"
AWS_SNAPSHOT_ID="${AWS_SNAPSHOT_ID:-}"
if [ -n "$AWS_CMD" ]; then
$AWS_CMD ec2 terminate-instances --instance-ids "$AWS_INSTANCE_ID"
$AWS_CMD ec2 deregister-image --image-id "$AMI_IMAGE_ID"
$AWS_CMD ec2 delete-snapshot --snapshot-id "$AWS_SNAPSHOT_ID"
$AWS_CMD ec2 delete-key-pair --key-name "key-for-$AMI_IMAGE_ID"
fi
}
function createReqFile() {
AWS_SNAPSHOT_NAME=${TEST_ID}
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"filesystem": [
{
"mountpoint": "/var",
"min_size": 262144000
}
],
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
]${SUBSCRIPTION_BLOCK},
"users":[
{
"name": "user1",
"groups": ["wheel"],
"key": "$(cat "${WORKDIR}/usertest.pub")"
},
{
"name": "user2",
"key": "$(cat "${WORKDIR}/usertest.pub")"
}
]
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"upload_options": {
"region": "${AWS_REGION}",
"snapshot_name": "${AWS_SNAPSHOT_NAME}",
"share_with_accounts": ["${AWS_API_TEST_SHARE_ACCOUNT}"]
}
}
}
EOF
}
function checkUploadStatusOptions() {
local AMI
AMI=$(echo "$UPLOAD_OPTIONS" | jq -r '.ami')
local REGION
REGION=$(echo "$UPLOAD_OPTIONS" | jq -r '.region')
# AWS ID consist of resource identifier followed by a 17-character string
echo "$AMI" | grep -e 'ami-[[:alnum:]]\{17\}' -
test "$REGION" = "$AWS_REGION"
}
# Verify image in EC2 on AWS
function verify() {
$AWS_CMD ec2 describe-images \
--owners self \
--filters Name=name,Values="$AWS_SNAPSHOT_NAME" \
> "$WORKDIR/ami.json"
AMI_IMAGE_ID=$(jq -r '.Images[].ImageId' "$WORKDIR/ami.json")
AWS_SNAPSHOT_ID=$(jq -r '.Images[].BlockDeviceMappings[].Ebs.SnapshotId' "$WORKDIR/ami.json")
# Tag image and snapshot with "gitlab-ci-test" tag
$AWS_CMD ec2 create-tags \
--resources "${AWS_SNAPSHOT_ID}" "${AMI_IMAGE_ID}" \
--tags Key=gitlab-ci-test,Value=true
SHARE_OK=1
# Verify that the ec2 snapshot was shared
$AWS_CMD ec2 describe-snapshot-attribute --snapshot-id "$AWS_SNAPSHOT_ID" --attribute createVolumePermission > "$WORKDIR/snapshot-attributes.json"
SHARED_ID=$(jq -r '.CreateVolumePermissions[0].UserId' "$WORKDIR/snapshot-attributes.json")
if [ "$AWS_API_TEST_SHARE_ACCOUNT" != "$SHARED_ID" ]; then
SHARE_OK=0
fi
# Verify that the ec2 ami was shared
$AWS_CMD ec2 describe-image-attribute --image-id "$AMI_IMAGE_ID" --attribute launchPermission > "$WORKDIR/ami-attributes.json"
SHARED_ID=$(jq -r '.LaunchPermissions[0].UserId' "$WORKDIR/ami-attributes.json")
if [ "$AWS_API_TEST_SHARE_ACCOUNT" != "$SHARED_ID" ]; then
SHARE_OK=0
fi
if [ "$SHARE_OK" != 1 ]; then
echo "EC2 snapshot wasn't shared with the AWS_API_TEST_SHARE_ACCOUNT. 😢"
exit 1
fi
# Create key-pair
$AWS_CMD ec2 create-key-pair --key-name "key-for-$AMI_IMAGE_ID" --query 'KeyMaterial' --output text > keypair.pem
chmod 400 ./keypair.pem
# Create an instance based on the ami
$AWS_CMD ec2 run-instances --image-id "$AMI_IMAGE_ID" --count 1 --instance-type t2.micro --key-name "key-for-$AMI_IMAGE_ID" --tag-specifications 'ResourceType=instance,Tags=[{Key=gitlab-ci-test,Value=true}]' > "$WORKDIR/instances.json"
AWS_INSTANCE_ID=$(jq -r '.Instances[].InstanceId' "$WORKDIR/instances.json")
$AWS_CMD ec2 wait instance-running --instance-ids "$AWS_INSTANCE_ID"
$AWS_CMD ec2 describe-instances --instance-ids "$AWS_INSTANCE_ID" > "$WORKDIR/instances.json"
HOST=$(jq -r '.Reservations[].Instances[].PublicIpAddress' "$WORKDIR/instances.json")
echo "⏱ Waiting for AWS instance to respond to ssh"
_instanceWaitSSH "$HOST"
# Verify image
_ssh="ssh -oStrictHostKeyChecking=no -i ./keypair.pem $SSH_USER@$HOST"
_instanceCheck "$_ssh"
# Check access to user1 and user2
check_groups=$(ssh -oStrictHostKeyChecking=no -i "${WORKDIR}/usertest" "user1@$HOST" -t 'groups')
if [[ $check_groups =~ "wheel" ]]; then
echo "✔️ user1 has the group wheel"
else
echo 'user1 should have the group wheel 😢'
exit 1
fi
check_groups=$(ssh -oStrictHostKeyChecking=no -i "${WORKDIR}/usertest" "user2@$HOST" -t 'groups')
if [[ $check_groups =~ "wheel" ]]; then
echo 'user2 should not have group wheel 😢'
exit 1
else
echo "✔️ user2 does not have the group wheel"
fi
}

176
test/cases/api/azure.sh Normal file
View file

@ -0,0 +1,176 @@
#!/usr/bin/bash
source /usr/libexec/tests/osbuild-composer/api/common/common.sh
# Check that needed variables are set to access Azure.
function checkEnv() {
printenv AZURE_TENANT_ID AZURE_SUBSCRIPTION_ID AZURE_RESOURCE_GROUP AZURE_LOCATION V2_AZURE_CLIENT_ID V2_AZURE_CLIENT_SECRET > /dev/null
}
function cleanup() {
# since this function can be called at any time, ensure that we don't expand unbound variables
AZURE_CMD="${AZURE_CMD:-}"
AZURE_IMAGE_NAME="${AZURE_IMAGE_NAME:-}"
AZURE_INSTANCE_NAME="${AZURE_INSTANCE_NAME:-}"
# do not run clean-up if the image name is not yet defined
if [[ -n "$AZURE_CMD" && -n "$AZURE_IMAGE_NAME" ]]; then
# Re-get the vm_details in case the VM creation is failed.
[ -f "$WORKDIR/vm_details.json" ] || $AZURE_CMD vm show --name "$AZURE_INSTANCE_NAME" --resource-group "$AZURE_RESOURCE_GROUP" --show-details > "$WORKDIR/vm_details.json"
# Get all the resources ids
VM_ID=$(jq -r '.id' "$WORKDIR"/vm_details.json)
OSDISK_ID=$(jq -r '.storageProfile.osDisk.managedDisk.id' "$WORKDIR"/vm_details.json)
NIC_ID=$(jq -r '.networkProfile.networkInterfaces[0].id' "$WORKDIR"/vm_details.json)
$AZURE_CMD network nic show --ids "$NIC_ID" > "$WORKDIR"/nic_details.json
NSG_ID=$(jq -r '.networkSecurityGroup.id' "$WORKDIR"/nic_details.json)
PUBLICIP_ID=$(jq -r '.ipConfigurations[0].publicIpAddress.id' "$WORKDIR"/nic_details.json)
# Delete resources. Some resources must be removed in order:
# - Delete VM prior to any other resources
# - Delete NIC prior to NSG, public-ip
# Left Virtual Network and Storage Account there because other tests in the same resource group will reuse them
for id in "$VM_ID" "$OSDISK_ID" "$NIC_ID" "$NSG_ID" "$PUBLICIP_ID"; do
echo "Deleting $id..."
$AZURE_CMD resource delete --ids "$id"
done
# Delete image after VM deleting.
$AZURE_CMD image delete --resource-group "$AZURE_RESOURCE_GROUP" --name "$AZURE_IMAGE_NAME"
# find a storage account by its tag
AZURE_STORAGE_ACCOUNT=$($AZURE_CMD resource list --tag imageBuilderStorageAccount=location="$AZURE_LOCATION" | jq -r .[0].name)
AZURE_CONNECTION_STRING=$($AZURE_CMD storage account show-connection-string --name "$AZURE_STORAGE_ACCOUNT" | jq -r .connectionString)
$AZURE_CMD storage blob delete --container-name imagebuilder --name "$AZURE_IMAGE_NAME".vhd --account-name "$AZURE_STORAGE_ACCOUNT" --connection-string "$AZURE_CONNECTION_STRING"
fi
}
function installClient() {
if ! hash az; then
echo "Using 'azure-cli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
# directory mounted to the container, in which azure-cli stores the credentials after logging in
AZURE_CMD_CREDS_DIR="${WORKDIR}/azure-cli_credentials"
mkdir "${AZURE_CMD_CREDS_DIR}"
AZURE_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-v ${AZURE_CMD_CREDS_DIR}:/root/.azure:Z \
-v ${WORKDIR}:${WORKDIR}:Z \
${CONTAINER_IMAGE_CLOUD_TOOLS} az"
else
echo "Using pre-installed 'azure-cli' from the system"
AZURE_CMD="az"
fi
$AZURE_CMD version
}
function createReqFile() {
AZURE_IMAGE_NAME="image-$TEST_ID"
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"filesystem": [
{
"mountpoint": "/var",
"min_size": 262144000
}
],
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
]${SUBSCRIPTION_BLOCK}
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"upload_options": {
"tenant_id": "${AZURE_TENANT_ID}",
"subscription_id": "${AZURE_SUBSCRIPTION_ID}",
"resource_group": "${AZURE_RESOURCE_GROUP}",
"location": "${AZURE_LOCATION}",
"image_name": "${AZURE_IMAGE_NAME}"
}
}
}
EOF
}
function checkUploadStatusOptions() {
local IMAGE_NAME
IMAGE_NAME=$(echo "$UPLOAD_OPTIONS" | jq -r '.image_name')
test "$IMAGE_NAME" = "$AZURE_IMAGE_NAME"
}
# Verify image in Azure
function verify() {
set +x
$AZURE_CMD login --service-principal --username "${V2_AZURE_CLIENT_ID}" --password "${V2_AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}"
set -x
# verify that the image exists and tag it
$AZURE_CMD image show --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_IMAGE_NAME}"
$AZURE_CMD image update --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_IMAGE_NAME}" --tags gitlab-ci-test=true
# Verify that the image boots and have customizations applied
# Create SSH keys to use
AZURE_SSH_KEY="$WORKDIR/id_azure"
ssh-keygen -t rsa-sha2-512 -f "$AZURE_SSH_KEY" -C "$SSH_USER" -N ""
# Create network resources with predictable names
$AZURE_CMD network nsg create --resource-group "$AZURE_RESOURCE_GROUP" --name "nsg-$TEST_ID" --location "$AZURE_LOCATION" --tags gitlab-ci-test=true
$AZURE_CMD network nsg rule create --resource-group "$AZURE_RESOURCE_GROUP" \
--nsg-name "nsg-$TEST_ID" \
--name SSH \
--priority 1001 \
--access Allow \
--protocol Tcp \
--destination-address-prefixes '*' \
--destination-port-ranges 22 \
--source-port-ranges '*' \
--source-address-prefixes '*'
$AZURE_CMD network vnet create --resource-group "$AZURE_RESOURCE_GROUP" \
--name "vnet-$TEST_ID" \
--subnet-name "snet-$TEST_ID" \
--location "$AZURE_LOCATION" \
--tags gitlab-ci-test=true
$AZURE_CMD network public-ip create --resource-group "$AZURE_RESOURCE_GROUP" --name "ip-$TEST_ID" --location "$AZURE_LOCATION" --tags gitlab-ci-test=true
$AZURE_CMD network nic create --resource-group "$AZURE_RESOURCE_GROUP" \
--name "iface-$TEST_ID" \
--subnet "snet-$TEST_ID" \
--vnet-name "vnet-$TEST_ID" \
--network-security-group "nsg-$TEST_ID" \
--public-ip-address "ip-$TEST_ID" \
--location "$AZURE_LOCATION" \
--tags gitlab-ci-test=true
# create the instance
AZURE_INSTANCE_NAME="vm-$TEST_ID"
$AZURE_CMD vm create --name "$AZURE_INSTANCE_NAME" \
--resource-group "$AZURE_RESOURCE_GROUP" \
--image "$AZURE_IMAGE_NAME" \
--size "Standard_B1s" \
--admin-username "$SSH_USER" \
--ssh-key-values "$AZURE_SSH_KEY.pub" \
--authentication-type "ssh" \
--location "$AZURE_LOCATION" \
--nics "iface-$TEST_ID" \
--os-disk-name "disk-$TEST_ID" \
--tags gitlab-ci-test=true
$AZURE_CMD vm show --name "$AZURE_INSTANCE_NAME" --resource-group "$AZURE_RESOURCE_GROUP" --show-details > "$WORKDIR/vm_details.json"
HOST=$(jq -r '.publicIps' "$WORKDIR/vm_details.json")
echo "⏱ Waiting for Azure instance to respond to ssh"
_instanceWaitSSH "$HOST"
# Verify image
_ssh="ssh -oStrictHostKeyChecking=no -i $AZURE_SSH_KEY $SSH_USER@$HOST"
_instanceCheck "$_ssh"
}

View file

@ -0,0 +1,23 @@
#!/usr/bin/bash
# Check that needed variables are set to access AWS.
function checkEnv() {
printenv AWS_REGION AWS_BUCKET V2_AWS_ACCESS_KEY_ID V2_AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
}
function installClient() {
if ! hash aws; then
echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
-e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \
-v ${WORKDIR}:${WORKDIR}:Z \
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on"
else
echo "Using pre-installed 'aws' from the system"
AWS_CMD="aws --region $AWS_REGION --output json --color on"
fi
$AWS_CMD --version
}

View file

@ -0,0 +1,46 @@
#!/usr/bin/bash
# Reusable function, which waits for a given host to respond to SSH
function _instanceWaitSSH() {
local HOST="$1"
for LOOP_COUNTER in {0..30}; do
if ssh-keyscan "$HOST" > /dev/null 2>&1; then
echo "SSH is up!"
ssh-keyscan "$HOST" | sudo tee -a /root/.ssh/known_hosts
break
fi
echo "Retrying in 5 seconds... $LOOP_COUNTER"
sleep 5
done
}
function _instanceCheck() {
echo "✔️ Instance checking"
local _ssh="$1"
# Check if postgres is installed
$_ssh rpm -q postgresql dummy
# Verify subscribe status. Loop check since the system may not be registered such early(RHEL only)
if [[ "$ID" == "rhel" ]]; then
set +eu
for LOOP_COUNTER in {1..10}; do
subscribe_org_id=$($_ssh sudo subscription-manager identity | grep 'org ID')
if [[ "$subscribe_org_id" == "org ID: $API_TEST_SUBSCRIPTION_ORG_ID" ]]; then
echo "System is subscribed."
break
else
echo "System is not subscribed. Retrying in 30 seconds...($LOOP_COUNTER/10)"
sleep 30
fi
done
set -eu
[[ "$subscribe_org_id" == "org ID: $API_TEST_SUBSCRIPTION_ORG_ID" ]]
# Unregister subscription
$_ssh sudo subscription-manager unregister
else
echo "Not RHEL OS. Skip subscription check."
fi
}

162
test/cases/api/gcp.sh Normal file
View file

@ -0,0 +1,162 @@
#!/usr/bin/bash
source /usr/libexec/tests/osbuild-composer/api/common/common.sh
# Check that needed variables are set to access GCP.
function checkEnv() {
printenv GOOGLE_APPLICATION_CREDENTIALS GCP_BUCKET GCP_REGION GCP_API_TEST_SHARE_ACCOUNT > /dev/null
}
function cleanup() {
# since this function can be called at any time, ensure that we don't expand unbound variables
GCP_CMD="${GCP_CMD:-}"
GCP_IMAGE_NAME="${GCP_IMAGE_NAME:-}"
GCP_INSTANCE_NAME="${GCP_INSTANCE_NAME:-}"
GCP_ZONE="${GCP_ZONE:-}"
if [ -n "$GCP_CMD" ]; then
$GCP_CMD compute instances delete --zone="$GCP_ZONE" "$GCP_INSTANCE_NAME"
$GCP_CMD compute images delete "$GCP_IMAGE_NAME"
fi
}
function installClient() {
if ! hash gcloud; then
echo "Using 'gcloud' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
# directory mounted to the container, in which gcloud stores the credentials after logging in
GCP_CMD_CREDS_DIR="${WORKDIR}/gcloud_credentials"
mkdir "${GCP_CMD_CREDS_DIR}"
GCP_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-v ${GCP_CMD_CREDS_DIR}:/root/.config/gcloud:Z \
-v ${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}:Z \
-v ${WORKDIR}:${WORKDIR}:Z \
${CONTAINER_IMAGE_CLOUD_TOOLS} gcloud --format=json"
else
echo "Using pre-installed 'gcloud' from the system"
GCP_CMD="gcloud --format=json --quiet"
fi
$GCP_CMD --version
}
function createReqFile() {
# constrains for GCP resource IDs:
# - max 62 characters
# - must be a match of regex '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}'
#
# use sha224sum to get predictable 56 characters long testID without invalid characters
GCP_TEST_ID_HASH="$(echo -n "$TEST_ID" | sha224sum - | sed -E 's/([a-z0-9])\s+-/\1/')"
GCP_IMAGE_NAME="image-$GCP_TEST_ID_HASH"
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"filesystem": [
{
"mountpoint": "/var",
"min_size": 262144000
}
],
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
]${SUBSCRIPTION_BLOCK}
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"upload_options": {
"bucket": "${GCP_BUCKET}",
"region": "${GCP_REGION}",
"image_name": "${GCP_IMAGE_NAME}",
"share_with_accounts": ["${GCP_API_TEST_SHARE_ACCOUNT}"]
}
}
}
EOF
}
function checkUploadStatusOptions() {
GCP_PROJECT=$(jq -r '.project_id' "$GOOGLE_APPLICATION_CREDENTIALS")
local IMAGE_NAME
IMAGE_NAME=$(echo "$UPLOAD_OPTIONS" | jq -r '.image_name')
local PROJECT_ID
PROJECT_ID=$(echo "$UPLOAD_OPTIONS" | jq -r '.project_id')
test "$IMAGE_NAME" = "$GCP_IMAGE_NAME"
test "$PROJECT_ID" = "$GCP_PROJECT"
}
# Verify image in Compute Engine on GCP
function verify() {
# Authenticate
$GCP_CMD auth activate-service-account --key-file "$GOOGLE_APPLICATION_CREDENTIALS"
# Extract and set the default project to be used for commands
GCP_PROJECT=$(jq -r '.project_id' "$GOOGLE_APPLICATION_CREDENTIALS")
$GCP_CMD config set project "$GCP_PROJECT"
# Add "gitlab-ci-test" label to the image
$GCP_CMD compute images add-labels "$GCP_IMAGE_NAME" --labels=gitlab-ci-test=true
# Verify that the image was shared
SHARE_OK=1
$GCP_CMD compute images get-iam-policy "$GCP_IMAGE_NAME" > "$WORKDIR/image-iam-policy.json"
SHARED_ACCOUNT=$(jq -r '.bindings[0].members[0]' "$WORKDIR/image-iam-policy.json")
SHARED_ROLE=$(jq -r '.bindings[0].role' "$WORKDIR/image-iam-policy.json")
if [ "$SHARED_ACCOUNT" != "$GCP_API_TEST_SHARE_ACCOUNT" ] || [ "$SHARED_ROLE" != "roles/compute.imageUser" ]; then
SHARE_OK=0
fi
if [ "$SHARE_OK" != 1 ]; then
echo "GCP image wasn't shared with the GCP_API_TEST_SHARE_ACCOUNT. 😢"
exit 1
fi
# Verify that the image boots and have customizations applied
# Create SSH keys to use
GCP_SSH_KEY="$WORKDIR/id_google_compute_engine"
ssh-keygen -t rsa-sha2-512 -f "$GCP_SSH_KEY" -C "$SSH_USER" -N ""
GCP_SSH_METADATA_FILE="$WORKDIR/gcp-ssh-keys-metadata"
echo "${SSH_USER}:$(cat "$GCP_SSH_KEY".pub)" > "$GCP_SSH_METADATA_FILE"
# create the instance
# resource ID can have max 62 characters, the $GCP_TEST_ID_HASH contains 56 characters
GCP_INSTANCE_NAME="vm-$GCP_TEST_ID_HASH"
# Randomize the used GCP zone to prevent hitting "exhausted resources" error on each test re-run
# disable Shellcheck error as the suggested alternatives are less readable for this use case
# shellcheck disable=SC2207
local GCP_ZONES=($($GCP_CMD compute zones list --filter="region=$GCP_REGION" | jq '.[] | select(.status == "UP") | .name' | tr -d '"' | tr '\n' ' '))
GCP_ZONE=${GCP_ZONES[$((RANDOM % ${#GCP_ZONES[@]}))]}
$GCP_CMD compute instances create "$GCP_INSTANCE_NAME" \
--zone="$GCP_ZONE" \
--image-project="$GCP_PROJECT" \
--image="$GCP_IMAGE_NAME" \
--labels=gitlab-ci-test=true \
--metadata-from-file=ssh-keys="$GCP_SSH_METADATA_FILE"
HOST=$($GCP_CMD compute instances describe "$GCP_INSTANCE_NAME" --zone="$GCP_ZONE" --format='get(networkInterfaces[0].accessConfigs[0].natIP)')
echo "⏱ Waiting for GCP instance to respond to ssh"
_instanceWaitSSH "$HOST"
# Verify image
_ssh="ssh -oStrictHostKeyChecking=no -i $GCP_SSH_KEY $SSH_USER@$HOST"
_instanceCheck "$_ssh"
}