test/api: Add gcloud and generic s3 changes to split api tests

- Fixed shellcheck errors
- Moved checkEnv from common to individual tests
- Fixed package install section in spec file:
Globs which include a directory fail on el-like distros.
- Use gcloud cli to ssh
- (re)Introduce generic s3 tests
This commit is contained in:
Sanne Raymaekers 2022-06-30 12:17:07 +02:00 committed by Achilleas Koutsou
parent cb7c0283a5
commit 339d69d2da
9 changed files with 678 additions and 82 deletions

View file

@ -212,13 +212,13 @@ install -m 0755 -vp tools/generic_s3_https_test.sh %{buildroot}%
install -m 0755 -vp tools/set-env-variables.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/test-case-generators/generate-test-cases %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer
install -m 0755 -vp test/cases/* %{buildroot}%{_libexecdir}/tests/osbuild-composer/
install -m 0755 -vp test/cases/*.sh %{buildroot}%{_libexecdir}/tests/osbuild-composer/
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer/api
install -m 0755 -vp test/cases/api/* %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/
install -m 0755 -vp test/cases/api/*.sh %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/common
install -m 0755 -vp test/cases/api/common/* %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/common/
install -m 0755 -vp test/cases/api/common/*.sh %{buildroot}%{_libexecdir}/tests/osbuild-composer/api/common/
install -m 0755 -vd %{buildroot}%{_datadir}/tests/osbuild-composer/ansible
install -m 0644 -vp test/data/ansible/* %{buildroot}%{_datadir}/tests/osbuild-composer/ansible/

View file

@ -25,15 +25,15 @@ CLOUD_PROVIDER_GENERIC_S3="generic.s3"
#
# Supported Image type names
#
IMAGE_TYPE_AWS="aws"
IMAGE_TYPE_AZURE="azure"
IMAGE_TYPE_EDGE_COMMIT="edge-commit"
IMAGE_TYPE_EDGE_CONTAINER="edge-container"
IMAGE_TYPE_EDGE_INSTALLER="edge-installer"
IMAGE_TYPE_GCP="gcp"
IMAGE_TYPE_IMAGE_INSTALLER="image-installer"
IMAGE_TYPE_GUEST="guest-image"
IMAGE_TYPE_VSPHERE="vsphere"
export IMAGE_TYPE_AWS="aws"
export IMAGE_TYPE_AZURE="azure"
export IMAGE_TYPE_EDGE_COMMIT="edge-commit"
export IMAGE_TYPE_EDGE_CONTAINER="edge-container"
export IMAGE_TYPE_EDGE_INSTALLER="edge-installer"
export IMAGE_TYPE_GCP="gcp"
export IMAGE_TYPE_IMAGE_INSTALLER="image-installer"
export IMAGE_TYPE_GUEST="guest-image"
export IMAGE_TYPE_VSPHERE="vsphere"
if (( $# > 2 )); then
echo "$0 does not support more than two arguments"
@ -86,7 +86,7 @@ mkdir -p "${ARTIFACTS}"
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
# Container image used for cloud provider CLI tools
CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest"
export CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest"
#
# Provision the software under test.
@ -166,12 +166,18 @@ case $CLOUD_PROVIDER in
"$CLOUD_PROVIDER_AWS_S3")
source /usr/libexec/tests/osbuild-composer/api/aws.s3.sh
;;
"$CLOUD_PROVIDER_GENERIC_S3")
source /usr/libexec/tests/osbuild-composer/api/generic.s3.sh
;;
"$CLOUD_PROVIDER_GCP")
source /usr/libexec/tests/osbuild-composer/api/gcp.sh
;;
"$CLOUD_PROVIDER_AZURE")
source /usr/libexec/tests/osbuild-composer/api/azure.sh
;;
*)
echo "Unknown cloud provider: ${CLOUD_PROVIDER}"
exit 1
esac
# Verify that this script is running in the right environment.
@ -218,8 +224,8 @@ trap cleanups EXIT
sudo dnf install -y rpm-build createrepo
DUMMYRPMDIR=$(mktemp -d)
DUMMYSPECFILE="$DUMMYRPMDIR/dummy.spec"
PAYLOAD_REPO_PORT="9999"
PAYLOAD_REPO_URL="http://localhost:9999"
export PAYLOAD_REPO_PORT="9999"
export PAYLOAD_REPO_URL="http://localhost:9999"
pushd "$DUMMYRPMDIR"
cat <<EOF > "$DUMMYSPECFILE"
@ -275,6 +281,7 @@ curl \
REQUEST_FILE="${WORKDIR}/request.json"
ARCH=$(uname -m)
SSH_USER=
TEST_ID="$(uuidgen)"
# Generate a string, which can be used as a predictable resource name,
# especially when running the test in CI where we may need to clean up
@ -283,10 +290,8 @@ CI="${CI:-false}"
if [[ "$CI" == true ]]; then
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
else
# if not running in Jenkins, generate ID not relying on specific env variables
TEST_ID=$(uuidgen);
fi
export TEST_ID
if [[ "$ID" == "fedora" ]]; then
# fedora uses fedora for everything
@ -298,12 +303,14 @@ else
# RHEL and centos use cloud-user for other clouds
SSH_USER="cloud-user"
fi
export SSH_USER
# This removes dot from VERSION_ID.
# ID == rhel && VERSION_ID == 8.6 => DISTRO == rhel-86
# ID == centos && VERSION_ID == 8 => DISTRO == centos-8
# ID == fedora && VERSION_ID == 35 => DISTRO == fedora-35
DISTRO="$ID-${VERSION_ID//./}"
export DISTRO="$ID-${VERSION_ID//./}"
SUBSCRIPTION_BLOCK=
# Only RHEL need subscription block.
if [[ "$ID" == "rhel" ]]; then
@ -318,9 +325,8 @@ if [[ "$ID" == "rhel" ]]; then
}
EndOfMessage
)
else
SUBSCRIPTION_BLOCK=''
fi
export SUBSCRIPTION_BLOCK
# generate a temp key for user tests
ssh-keygen -t rsa-sha2-512 -f "${WORKDIR}/usertest" -C "usertest" -N ""
@ -401,6 +407,9 @@ function waitForState() {
sleep 30
done
# export for use in subcases
export UPLOAD_OPTIONS
}
#
@ -412,6 +421,7 @@ jq '.customizations.packages = [ "jesuisunpaquetquinexistepas" ]' "$REQUEST_FILE
sendCompose "$REQUEST_FILE2"
waitForState "failure"
# crashed/stopped/killed worker should result in a failed state
sendCompose "$REQUEST_FILE"
waitForState "building"

View file

@ -1,12 +1,21 @@
#!/usr/bin/bash
source /usr/libexec/tests/osbuild-composer/api/common/aws.sh
source /usr/libexec/tests/osbuild-composer/api/common/common.sh
source /usr/libexec/tests/osbuild-composer/api/common/s3.sh
# Check that needed variables are set to access AWS.
function checkEnv() {
printenv AWS_REGION AWS_BUCKET V2_AWS_ACCESS_KEY_ID V2_AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
if [ "${IMAGE_TYPE}" = "${IMAGE_TYPE_VSPHERE}" ]; then
printenv GOVMOMI_USERNAME GOVMOMI_PASSWORD GOVMOMI_URL GOVMOMI_CLUSTER GOVC_DATACENTER GOVMOMI_DATASTORE GOVMOMI_FOLDER GOVMOMI_NETWORK > /dev/null
fi
}
#
# Global var for ostree ref
#
OSTREE_REF="test/rhel/8/edge"
function cleanup() {
local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
@ -28,46 +37,21 @@ function cleanup() {
}
function createReqFile() {
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
]${SUBSCRIPTION_BLOCK},
"users":[
{
"name": "user1",
"groups": ["wheel"],
"key": "$(cat "${WORKDIR}/usertest.pub")"
},
{
"name": "user2",
"key": "$(cat "${WORKDIR}/usertest.pub")"
}
]
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"ostree": {
"ref": "${OSTREE_REF}"
},
"upload_options": {
"region": "${AWS_REGION}"
}
}
case ${IMAGE_TYPE} in
"$IMAGE_TYPE_EDGE_COMMIT"|"$IMAGE_TYPE_EDGE_CONTAINER"|"$IMAGE_TYPE_EDGE_INSTALLER"|"$IMAGE_TYPE_IMAGE_INSTALLER")
createReqFileEdge
;;
"$IMAGE_TYPE_VSPHERE")
createReqFileGuest
;;
"$IMAGE_TYPE_VSPHERE")
createReqFileVSphere
;;
*)
echo "Unknown s3 image type for: ${IMAGE_TYPE}"
exit 1
esac
}
EOF
}
function checkUploadStatusOptions() {
local S3_URL
@ -177,7 +161,7 @@ function verify() {
"${IMAGE_TYPE_VSPHERE}")
curl "${S3_URL}" --output "${WORKDIR}/disk.vmdk"
verifyDisk "${WORKDIR}/disk.vmdk"
verifyInVSphere "${WORKDIR}/disk.vmdk"
;;
*)
greenprint "No validation method for image type ${IMAGE_TYPE}"

View file

@ -3,6 +3,10 @@
source /usr/libexec/tests/osbuild-composer/api/common/aws.sh
source /usr/libexec/tests/osbuild-composer/api/common/common.sh
function checkEnv() {
printenv AWS_REGION AWS_BUCKET V2_AWS_ACCESS_KEY_ID V2_AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
}
function cleanup() {
# since this function can be called at any time, ensure that we don't expand unbound variables
AWS_CMD="${AWS_CMD:-}"

View file

@ -46,7 +46,7 @@ function cleanup() {
function installClient() {
if ! hash az; then
echo "Using 'azure-cli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_IMAGE_CLOUD_TOOLS}"
# directory mounted to the container, in which azure-cli stores the credentials after logging in
AZURE_CMD_CREDS_DIR="${WORKDIR}/azure-cli_credentials"

View file

@ -1,14 +1,9 @@
#!/usr/bin/bash
# Check that needed variables are set to access AWS.
function checkEnv() {
printenv AWS_REGION AWS_BUCKET V2_AWS_ACCESS_KEY_ID V2_AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
}
function installClient() {
if ! hash aws; then
echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_IMAGE_CLOUD_TOOLS}"
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
@ -20,4 +15,19 @@ function installClient() {
AWS_CMD="aws --region $AWS_REGION --output json --color on"
fi
$AWS_CMD --version
if ! hash govc; then
greenprint "Installing govc"
pushd "${WORKDIR}" || exit 1
curl -Ls --retry 5 --output govc.gz \
https://github.com/vmware/govmomi/releases/download/v0.24.0/govc_linux_amd64.gz
gunzip -f govc.gz
GOVC_CMD="${WORKDIR}/govc"
chmod +x "${GOVC_CMD}"
popd || exit 1
else
echo "Using pre-installed 'govc' from the system"
GOVC_CMD="govc"
fi
$GOVC_CMD version
}

435
test/cases/api/common/s3.sh Normal file
View file

@ -0,0 +1,435 @@
#!/usr/bin/bash
function createReqFileEdge() {
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
],
"users":[
{
"name": "user1",
"groups": ["wheel"],
"key": "$(cat "${WORKDIR}/usertest.pub")"
},
{
"name": "user2",
"key": "$(cat "${WORKDIR}/usertest.pub")"
}
]
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"ostree": {
"ref": "${OSTREE_REF}"
},
"upload_options": {
"region": "${AWS_REGION}"
}
}
}
EOF
}
function createReqFileGuest() {
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
]${SUBSCRIPTION_BLOCK},
"users":[
{
"name": "user1",
"groups": ["wheel"],
"key": "$(cat "${WORKDIR}/usertest.pub")"
},
{
"name": "user2",
"key": "$(cat "${WORKDIR}/usertest.pub")"
}
]
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"upload_options": {
"region": "${AWS_REGION}"
}
}
}
EOF
}
# the VSphere test case does not create any additional users,
# since this is not supported by the service UI
function createReqFileVSphere() {
cat > "$REQUEST_FILE" << EOF
{
"distribution": "$DISTRO",
"customizations": {
"payload_repositories": [
{
"baseurl": "$PAYLOAD_REPO_URL"
}
],
"packages": [
"postgresql",
"dummy"
]${SUBSCRIPTION_BLOCK}
},
"image_request": {
"architecture": "$ARCH",
"image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"upload_options": {
"region": "${AWS_REGION}"
}
}
}
EOF
}
# Create a cloud-int user-data file
#
# Returns:
# - path to the user-data file
#
# Arguments:
# $1 - default username
# $2 - path to the SSH public key to set as authorized for the user
function createCIUserdata() {
local _user="$1"
local _ssh_pubkey_path="$2"
local _ci_userdata_dir
_ci_userdata_dir="$(mktemp -d -p "${WORKDIR}")"
local _ci_userdata_path="${_ci_userdata_dir}/user-data"
cat > "${_ci_userdata_path}" <<EOF
#cloud-config
users:
- name: "${_user}"
sudo: "ALL=(ALL) NOPASSWD:ALL"
ssh_authorized_keys:
- "$(cat "${_ssh_pubkey_path}")"
EOF
echo "${_ci_userdata_path}"
}
# Create a cloud-int meta-data file
#
# Returns:
# - path to the meta-data file
#
# Arguments:
# $1 - VM name
function createCIMetadata() {
local _vm_name="$1"
local _ci_metadata_dir
_ci_metadata_dir="$(mktemp -d -p "${WORKDIR}")"
local _ci_metadata_path="${_ci_metadata_dir}/meta-data"
cat > "${_ci_metadata_path}" <<EOF
instance-id: ${_vm_name}
local-hostname: ${_vm_name}
EOF
echo "${_ci_metadata_path}"
}
# Create an ISO with the provided cloud-init user-data file
#
# Returns:
# - path to the created ISO file
#
# Arguments:
# $1 - path to the cloud-init user-data file
# $2 - path to the cloud-init meta-data file
function createCIUserdataISO() {
local _ci_userdata_path="$1"
local _ci_metadata_path="$2"
local _iso_path
_iso_path="$(mktemp -p "${WORKDIR}" --suffix .iso)"
mkisofs \
-input-charset "utf-8" \
-output "${_iso_path}" \
-volid "cidata" \
-joliet \
-rock \
-quiet \
-graft-points \
"${_ci_userdata_path}" \
"${_ci_metadata_path}"
echo "${_iso_path}"
}
# Create a cloud-int user-data file
#
# Returns:
# - path to the user-data file
#
# Arguments:
# $1 - default username
# $2 - path to the SSH public key to set as authorized for the user
function createCIUserdata() {
local _user="$1"
local _ssh_pubkey_path="$2"
local _ci_userdata_dir
_ci_userdata_dir="$(mktemp -d -p "${WORKDIR}")"
local _ci_userdata_path="${_ci_userdata_dir}/user-data"
cat > "${_ci_userdata_path}" <<EOF
#cloud-config
users:
- name: "${_user}"
sudo: "ALL=(ALL) NOPASSWD:ALL"
ssh_authorized_keys:
- "$(cat "${_ssh_pubkey_path}")"
EOF
echo "${_ci_userdata_path}"
}
# Create a cloud-int meta-data file
#
# Returns:
# - path to the meta-data file
#
# Arguments:
# $1 - VM name
function createCIMetadata() {
local _vm_name="$1"
local _ci_metadata_dir
_ci_metadata_dir="$(mktemp -d -p "${WORKDIR}")"
local _ci_metadata_path="${_ci_metadata_dir}/meta-data"
cat > "${_ci_metadata_path}" <<EOF
instance-id: ${_vm_name}
local-hostname: ${_vm_name}
EOF
echo "${_ci_metadata_path}"
}
# Create an ISO with the provided cloud-init user-data file
#
# Returns:
# - path to the created ISO file
#
# Arguments:
# $1 - path to the cloud-init user-data file
# $2 - path to the cloud-init meta-data file
function createCIUserdataISO() {
local _ci_userdata_path="$1"
local _ci_metadata_path="$2"
local _iso_path
_iso_path="$(mktemp -p "${WORKDIR}" --suffix .iso)"
mkisofs \
-input-charset "utf-8" \
-output "${_iso_path}" \
-volid "cidata" \
-joliet \
-rock \
-quiet \
-graft-points \
"${_ci_userdata_path}" \
"${_ci_metadata_path}"
echo "${_iso_path}"
}
# verify edge commit content
function verifyEdgeCommit() {
filename="$1"
greenprint "Verifying contents of ${filename}"
# extract tarball and save file list to artifacts directroy
local COMMIT_DIR
COMMIT_DIR="${WORKDIR}/edge-commit"
mkdir -p "${COMMIT_DIR}"
tar xvf "${filename}" -C "${COMMIT_DIR}" > "${ARTIFACTS}/edge-commit-filelist.txt"
# Verify that the commit contains the ref we defined in the request
sudo dnf install -y ostree
local COMMIT_REF
COMMIT_REF=$(ostree refs --repo "${COMMIT_DIR}/repo")
if [[ "${COMMIT_REF}" != "${OSTREE_REF}" ]]; then
echo "Commit ref in archive does not match request 😠"
exit 1
fi
local TAR_COMMIT_ID
TAR_COMMIT_ID=$(ostree rev-parse --repo "${COMMIT_DIR}/repo" "${OSTREE_REF}")
API_COMMIT_ID_V2=$(curl \
--silent \
--show-error \
--cacert /etc/osbuild-composer/ca-crt.pem \
--key /etc/osbuild-composer/client-key.pem \
--cert /etc/osbuild-composer/client-crt.pem \
https://localhost/api/image-builder-composer/v2/composes/"$COMPOSE_ID"/metadata | jq -r '.ostree_commit')
if [[ "${API_COMMIT_ID_V2}" != "${TAR_COMMIT_ID}" ]]; then
echo "Commit ID returned from API does not match Commit ID in archive 😠"
exit 1
fi
}
# Verify image blobs from s3
function verifyDisk() {
filename="$1"
greenprint "Verifying contents of ${filename}"
infofile="${filename}-info.json"
sudo /usr/libexec/osbuild-composer-test/image-info "${filename}" | tee "${infofile}" > /dev/null
# save image info to artifacts
cp -v "${infofile}" "${ARTIFACTS}/image-info.json"
# check compose request users in passwd
if ! jq .passwd "${infofile}" | grep -q "user1"; then
greenprint "❌ user1 not found in passwd file"
exit 1
fi
if ! jq .passwd "${infofile}" | grep -q "user2"; then
greenprint "❌ user2 not found in passwd file"
exit 1
fi
# check packages for postgresql
if ! jq .packages "${infofile}" | grep -q "postgresql"; then
greenprint "❌ postgresql not found in packages"
exit 1
fi
greenprint "${filename} image info verified"
}
# Verify VMDK image in VSphere
function verifyInVSphere() {
local _filename="$1"
greenprint "Verifying VMDK image: ${_filename}"
# Create SSH keys to use
local _vsphere_ssh_key="${WORKDIR}/vsphere_ssh_key"
ssh-keygen -t rsa-sha2-512 -f "${_vsphere_ssh_key}" -C "${SSH_USER}" -N ""
VSPHERE_VM_NAME="osbuild-composer-vm-${TEST_ID}"
# create cloud-init ISO with the configuration
local _ci_userdata_path
_ci_userdata_path="$(createCIUserdata "${SSH_USER}" "${_vsphere_ssh_key}.pub")"
local _ci_metadata_path
_ci_metadata_path="$(createCIMetadata "${VSPHERE_VM_NAME}")"
greenprint "💿 Creating cloud-init user-data ISO"
local _ci_iso_path
_ci_iso_path="$(createCIUserdataISO "${_ci_userdata_path}" "${_ci_metadata_path}")"
VSPHERE_IMAGE_NAME="${VSPHERE_VM_NAME}.vmdk"
mv "${_filename}" "${WORKDIR}/${VSPHERE_IMAGE_NAME}"
# import the built VMDK image to VSphere
# import.vmdk seems to be creating the provided directory and
# if one with this name exists, it appends "_<number>" to the name
greenprint "💿 ⬆️ Importing the converted VMDK image to VSphere"
$GOVC_CMD import.vmdk \
-u "${GOVMOMI_USERNAME}:${GOVMOMI_PASSWORD}@${GOVMOMI_URL}" \
-k=true \
-dc="${GOVC_DATACENTER}" \
-ds="${GOVMOMI_DATASTORE}" \
"${WORKDIR}/${VSPHERE_IMAGE_NAME}" \
"${VSPHERE_VM_NAME}"
# create the VM, but don't start it
greenprint "🖥️ Creating VM in VSphere"
$GOVC_CMD vm.create \
-u "${GOVMOMI_USERNAME}:${GOVMOMI_PASSWORD}@${GOVMOMI_URL}" \
-k=true \
-dc="${GOVC_DATACENTER}" \
-pool="${GOVMOMI_CLUSTER}"/Resources \
-ds="${GOVMOMI_DATASTORE}" \
-folder="${GOVMOMI_FOLDER}" \
-net="${GOVMOMI_NETWORK}" \
-net.adapter=vmxnet3 \
-m=4096 -c=2 -g=rhel8_64Guest -on=true -firmware=bios \
-disk="${VSPHERE_VM_NAME}/${VSPHERE_IMAGE_NAME}" \
-disk.controller=ide \
-on=false \
"${VSPHERE_VM_NAME}"
# upload ISO, create CDROM device and insert the ISO in it
greenprint "💿 ⬆️ Uploading the cloud-init user-data ISO to VSphere"
VSPHERE_CIDATA_ISO_PATH="${VSPHERE_VM_NAME}/cidata.iso"
$GOVC_CMD datastore.upload \
-u "${GOVMOMI_USERNAME}:${GOVMOMI_PASSWORD}@${GOVMOMI_URL}" \
-k=true \
-dc="${GOVC_DATACENTER}" \
-ds="${GOVMOMI_DATASTORE}" \
"${_ci_iso_path}" \
"${VSPHERE_CIDATA_ISO_PATH}"
local _cdrom_device
greenprint "🖥️ + 💿 Adding a CD-ROM device to the VM"
_cdrom_device="$($GOVC_CMD device.cdrom.add \
-u "${GOVMOMI_USERNAME}:${GOVMOMI_PASSWORD}@${GOVMOMI_URL}" \
-k=true \
-dc="${GOVC_DATACENTER}" \
-vm "${VSPHERE_VM_NAME}")"
greenprint "💿 Inserting the cloud-init ISO into the CD-ROM device"
$GOVC_CMD device.cdrom.insert \
-u "${GOVMOMI_USERNAME}:${GOVMOMI_PASSWORD}@${GOVMOMI_URL}" \
-k=true \
-dc="${GOVC_DATACENTER}" \
-ds="${GOVMOMI_DATASTORE}" \
-vm "${VSPHERE_VM_NAME}" \
-device "${_cdrom_device}" \
"${VSPHERE_CIDATA_ISO_PATH}"
# start the VM
greenprint "🔌 Powering up the VSphere VM"
$GOVC_CMD vm.power \
-u "${GOVMOMI_USERNAME}:${GOVMOMI_PASSWORD}@${GOVMOMI_URL}" \
-k=true \
-dc="${GOVC_DATACENTER}" \
-on "${VSPHERE_VM_NAME}"
HOST=$($GOVC_CMD vm.ip \
-u "${GOVMOMI_USERNAME}:${GOVMOMI_PASSWORD}@${GOVMOMI_URL}" \
-k=true \
-dc="${GOVC_DATACENTER}" \
"${VSPHERE_VM_NAME}")
greenprint "⏱ Waiting for the VSphere VM to respond to ssh"
_instanceWaitSSH "${HOST}"
_ssh="ssh -oStrictHostKeyChecking=no -i ${_vsphere_ssh_key} $SSH_USER@$HOST"
_instanceCheck "${_ssh}"
greenprint "✅ Successfully verified VSphere image with cloud-init"
}

View file

@ -24,7 +24,7 @@ function cleanup() {
function installClient() {
if ! hash gcloud; then
echo "Using 'gcloud' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_IMAGE_CLOUD_TOOLS}"
# directory mounted to the container, in which gcloud stores the credentials after logging in
GCP_CMD_CREDS_DIR="${WORKDIR}/gcloud_credentials"
@ -131,32 +131,37 @@ function verify() {
# Create SSH keys to use
GCP_SSH_KEY="$WORKDIR/id_google_compute_engine"
ssh-keygen -t rsa-sha2-512 -f "$GCP_SSH_KEY" -C "$SSH_USER" -N ""
GCP_SSH_METADATA_FILE="$WORKDIR/gcp-ssh-keys-metadata"
echo "${SSH_USER}:$(cat "$GCP_SSH_KEY".pub)" > "$GCP_SSH_METADATA_FILE"
# create the instance
# resource ID can have max 62 characters, the $GCP_TEST_ID_HASH contains 56 characters
GCP_INSTANCE_NAME="vm-$GCP_TEST_ID_HASH"
# Ensure that we use random GCP region with available 'IN_USE_ADDRESSES' quota
# We use the CI variable "GCP_REGION" as the base for expression to filter regions.
# It works best if the "GCP_REGION" is set to a storage multi-region, such as "us"
local GCP_COMPUTE_REGION
GCP_COMPUTE_REGION=$($GCP_CMD compute regions list --filter="name:$GCP_REGION* AND status=UP" | jq -r '.[] | select(.quotas[] as $quota | $quota.metric == "IN_USE_ADDRESSES" and $quota.limit > $quota.usage) | .name' | shuf -n1)
# Randomize the used GCP zone to prevent hitting "exhausted resources" error on each test re-run
# disable Shellcheck error as the suggested alternatives are less readable for this use case
# shellcheck disable=SC2207
local GCP_ZONES=($($GCP_CMD compute zones list --filter="region=$GCP_REGION" | jq '.[] | select(.status == "UP") | .name' | tr -d '"' | tr '\n' ' '))
GCP_ZONE=${GCP_ZONES[$((RANDOM % ${#GCP_ZONES[@]}))]}
GCP_ZONE=$($GCP_CMD compute zones list --filter="region=$GCP_COMPUTE_REGION AND status=UP" | jq -r '.[].name' | shuf -n1)
# Pick the smallest '^n\d-standard-\d$' machine type from those available in the zone
local GCP_MACHINE_TYPE
GCP_MACHINE_TYPE=$($GCP_CMD compute machine-types list --filter="zone=$GCP_ZONE AND name~^n\d-standard-\d$" | jq -r '.[].name' | sort | head -1)
$GCP_CMD compute instances create "$GCP_INSTANCE_NAME" \
--zone="$GCP_ZONE" \
--image-project="$GCP_PROJECT" \
--image="$GCP_IMAGE_NAME" \
--labels=gitlab-ci-test=true \
--metadata-from-file=ssh-keys="$GCP_SSH_METADATA_FILE"
--machine-type="$GCP_MACHINE_TYPE" \
--labels=gitlab-ci-test=true
HOST=$($GCP_CMD compute instances describe "$GCP_INSTANCE_NAME" --zone="$GCP_ZONE" --format='get(networkInterfaces[0].accessConfigs[0].natIP)')
echo "⏱ Waiting for GCP instance to respond to ssh"
_instanceWaitSSH "$HOST"
# Verify image
_ssh="ssh -oStrictHostKeyChecking=no -i $GCP_SSH_KEY $SSH_USER@$HOST"
_ssh="$GCP_CMD compute ssh --strict-host-key-checking=no --ssh-key-file=$GCP_SSH_KEY --zone=$GCP_ZONE --quiet $SSH_USER@$GCP_INSTANCE_NAME --"
_instanceCheck "$_ssh"
}

View file

@ -0,0 +1,148 @@
#!/usr/bin/bash
source /usr/libexec/tests/osbuild-composer/api/common/common.sh
source /usr/libexec/tests/osbuild-composer/api/common/s3.sh
function checkEnv() {
printenv AWS_REGION > /dev/null
}
# Global var for ostree ref
export OSTREE_REF="test/rhel/8/edge"
function cleanup() {
MINIO_CONTAINER_NAME="${MINIO_CONTAINER_NAME:-}"
if [ -n "${MINIO_CONTAINER_NAME}" ]; then
sudo "${CONTAINER_RUNTIME}" kill "${MINIO_CONTAINER_NAME}"
fi
}
function installClient() {
local CONTAINER_MINIO_SERVER="quay.io/minio/minio:latest"
MINIO_CONTAINER_NAME="minio-server"
MINIO_ENDPOINT="http://localhost:9000"
local MINIO_ROOT_USER="X29DU5Q6C5NKDQ8PLGVT"
local MINIO_ROOT_PASSWORD
MINIO_ROOT_PASSWORD=$(date +%s | sha256sum | base64 | head -c 32 ; echo)
MINIO_BUCKET="ci-test"
local MINIO_REGION="${AWS_REGION}"
local MINIO_CREDENTIALS_FILE="/etc/osbuild-worker/minio-creds"
sudo "${CONTAINER_RUNTIME}" run --rm -d \
--name ${MINIO_CONTAINER_NAME} \
-p 9000:9000 \
-e MINIO_BROWSER=off \
-e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \
-e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \
${CONTAINER_MINIO_SERVER} server /data
if ! hash aws; then
echo "Using 'awscli' from a container"
sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_IMAGE_CLOUD_TOOLS}"
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${MINIO_ROOT_USER} \
-e AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD} \
-v ${WORKDIR}:${WORKDIR}:Z \
--network host \
${CONTAINER_IMAGE_CLOUD_TOOLS} aws"
else
echo "Using pre-installed 'aws' from the system"
AWS_CMD="AWS_ACCESS_KEY_ID=${MINIO_ROOT_USER} \
AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD} \
aws"
fi
AWS_CMD+=" --region $MINIO_REGION --output json --color on --endpoint-url $MINIO_ENDPOINT"
$AWS_CMD --version
# Configure the local server (retry until the service is up)
MINIO_CONFIGURE_RETRY=0
MINIO_CONFIGURE_MAX_RETRY=5
MINIO_RETRY_INTERVAL=15
until [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]
do
${AWS_CMD} s3 ls && break
MINIO_CONFIGURE_RETRY=$((MINIO_CONFIGURE_RETRY + 1))
echo "Retrying [${MINIO_CONFIGURE_RETRY}/${MINIO_CONFIGURE_MAX_RETRY}] in ${MINIO_RETRY_INTERVAL}(s) "
sleep ${MINIO_RETRY_INTERVAL}
done
if [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]; then
echo "Failed to communicate with the MinIO server after ${MINIO_CONFIGURE_MAX_RETRY} attempts!"
exit 1
fi
# Create the bucket
${AWS_CMD} s3 mb s3://${MINIO_BUCKET}
cat <<EOF | sudo tee "${MINIO_CREDENTIALS_FILE}"
[default]
aws_access_key_id = ${MINIO_ROOT_USER}
aws_secret_access_key = ${MINIO_ROOT_PASSWORD}
EOF
cat <<EOF | sudo tee "/etc/osbuild-worker/osbuild-worker.toml"
[generic_s3]
credentials = "${MINIO_CREDENTIALS_FILE}"
endpoint = "${MINIO_ENDPOINT}"
region = "${MINIO_REGION}"
bucket = "${MINIO_BUCKET}"
EOF
sudo systemctl restart "osbuild-worker@1"
}
# Unset AWS_REGION, region == "" in the request the worker will look for the generic s3
# implementation
function createReqFile() {
case ${IMAGE_TYPE} in
"$IMAGE_TYPE_EDGE_COMMIT"|"$IMAGE_TYPE_EDGE_CONTAINER"|"$IMAGE_TYPE_EDGE_INSTALLER"|"$IMAGE_TYPE_IMAGE_INSTALLER")
AWS_REGION='' createReqFileEdge
;;
"$IMAGE_TYPE_VSPHERE")
AWS_REGION='' createReqFileGuest
;;
"$IMAGE_TYPE_VSPHERE")
AWS_REGION='' createReqFileVSphere
;;
*)
echo "Unknown s3 image type for: ${IMAGE_TYPE}"
exit 1
esac
}
function checkUploadStatusOptions() {
local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
# S3 URL contains endpoint and bucket name
echo "$S3_URL" | grep -F "$MINIO_ENDPOINT" -
echo "$S3_URL" | grep -F "$MINIO_BUCKET" -
}
# Verify s3 blobs
function verify() {
local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
# Download the object using the Presigned URL and inspect
case ${IMAGE_TYPE} in
"$IMAGE_TYPE_EDGE_COMMIT")
curl "${S3_URL}" --output "${WORKDIR}/edge-commit.tar"
verifyEdgeCommit "${WORKDIR}/edge-commit.tar"
;;
"${IMAGE_TYPE_GUEST}")
curl "${S3_URL}" --output "${WORKDIR}/disk.qcow2"
verifyDisk "${WORKDIR}/disk.qcow2"
;;
"${IMAGE_TYPE_VSPHERE}")
curl "${S3_URL}" --output "${WORKDIR}/disk.vmdk"
verifyInVSphere "${WORKDIR}/disk.vmdk"
;;
*)
greenprint "No validation method for image type ${IMAGE_TYPE}"
;;
esac
greenprint "✅ Successfully verified S3 object"
}