Weldr: support GCP upload target

Add support for importing the GCE image into GCP using Weldr API. The
credentials to be used can be specified in the upload settings and will
be then used by the worker to authenticate with GCP.

The GCP target credentials are passed to Weldr API as base64 encoded
content of the GCP credentials JSON file. The reason is that the JSON
file contains many values and its format could change in the future.
This way, the Weldr API does not rely on the credentials file content
format in any way.

Add a new test case for the GCP upload via Weldr and run it in CI.

Signed-off-by: Tomas Hozza <thozza@redhat.com>
This commit is contained in:
Tomas Hozza 2022-03-07 21:49:24 +01:00 committed by Tom Gundersen
parent 249661a948
commit ee285e5e8a
6 changed files with 398 additions and 12 deletions

View file

@ -367,7 +367,7 @@ Rebase OSTree:
- rhos-01/centos-stream-8-x86_64-large
- rhos-01/centos-stream-9-x86_64-large
.integration:
.integration_base:
stage: test
extends: .terraform
rules:
@ -376,17 +376,39 @@ Rebase OSTree:
script:
- schutzbot/deploy.sh
- /usr/libexec/tests/osbuild-composer/${SCRIPT}
.rhel_runners: &rhel_runners
RUNNER:
- aws/centos-stream-8-x86_64
- aws/rhel-8.5-ga-x86_64
- aws/rhel-8.6-nightly-x86_64
- aws/rhel-9.0-nightly-x86_64
- aws/centos-stream-9-x86_64
INTERNAL_NETWORK: ["true"]
.integration_rhel:
extends: .integration_base
parallel:
matrix:
- RUNNER:
- aws/fedora-34-x86_64
- aws/fedora-35-x86_64
- aws/centos-stream-8-x86_64
- aws/rhel-8.5-ga-x86_64
- aws/rhel-8.6-nightly-x86_64
- aws/rhel-9.0-nightly-x86_64
- aws/centos-stream-9-x86_64
INTERNAL_NETWORK: ["true"]
- *rhel_runners
.fedora_runners: &fedora_runners
RUNNER:
- aws/fedora-34-x86_64
- aws/fedora-35-x86_64
.integration_fedora:
extends: .integration_base
parallel:
matrix:
- *fedora_runners
.integration:
extends: .integration_base
parallel:
matrix:
- *fedora_runners
- *rhel_runners
koji.sh:
extends: .integration
@ -403,6 +425,12 @@ azure.sh:
variables:
SCRIPT: azure.sh
# The required GCE image type is not supported on Fedora
gcp.sh:
extends: .integration_rhel
variables:
SCRIPT: gcp.sh
vmware.sh:
extends: .integration
variables:

View file

@ -465,7 +465,7 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error {
case *target.GCPTargetOptions:
ctx := context.Background()
g, err := impl.getGCP(nil)
g, err := impl.getGCP(options.Credentials)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil

View file

@ -6,7 +6,12 @@ type GCPTargetOptions struct {
Os string `json:"os"` // not exposed in cloudapi for now
Bucket string `json:"bucket"`
Object string `json:"object"`
ShareWithAccounts []string `json:"shareWithAccounts"`
ShareWithAccounts []string `json:"shareWithAccounts,omitempty"`
// If provided, these credentials are used by the worker to import the image
// to GCP. If not provided, the worker will try to authenticate using the
// credentials from worker's configuration.
Credentials []byte `json:"credentials,omitempty"`
}
func (GCPTargetOptions) isTargetOptions() {}

View file

@ -1,12 +1,15 @@
package weldr
import (
"encoding/base64"
"encoding/json"
"errors"
"strings"
"time"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/distro"
"github.com/sirupsen/logrus"
"github.com/google/uuid"
"github.com/osbuild/osbuild-composer/internal/target"
@ -55,6 +58,18 @@ type azureUploadSettings struct {
func (azureUploadSettings) isUploadSettings() {}
type gcpUploadSettings struct {
Filename string `json:"filename"`
Region string `json:"region"`
Bucket string `json:"bucket"`
Object string `json:"object"`
// base64 encoded GCP credentials JSON file
Credentials string `json:"credentials,omitempty"`
}
func (gcpUploadSettings) isUploadSettings() {}
type vmwareUploadSettings struct {
Host string `json:"host"`
Username string `json:"username"`
@ -113,6 +128,8 @@ func (u *uploadRequest) UnmarshalJSON(data []byte) error {
settings = new(awsUploadSettings)
case "aws.s3":
settings = new(awsS3UploadSettings)
case "gcp":
settings = new(gcpUploadSettings)
case "vmware":
settings = new(vmwareUploadSettings)
case "oci":
@ -179,6 +196,16 @@ func targetsToUploadResponses(targets []*target.Target, state ComposeState) []up
// StorageAccount and StorageAccessKey are intentionally not included.
}
uploads = append(uploads, upload)
case *target.GCPTargetOptions:
upload.ProviderName = "gcp"
upload.Settings = &gcpUploadSettings{
Filename: options.Filename,
Region: options.Region,
Bucket: options.Bucket,
Object: options.Object,
// Credentials are intentionally not included.
}
uploads = append(uploads, upload)
case *target.VMWareTargetOptions:
upload.ProviderName = "vmware"
upload.Settings = &vmwareUploadSettings{
@ -255,6 +282,33 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target.
StorageAccessKey: options.StorageAccessKey,
Container: options.Container,
}
case *gcpUploadSettings:
t.Name = "org.osbuild.gcp"
var gcpCredentials []byte
var err error
if options.Credentials != "" {
gcpCredentials, err = base64.StdEncoding.DecodeString(options.Credentials)
if err != nil {
panic(err)
}
}
// The uploaded image object name must have 'tar.gz' suffix to be imported
objectName := options.Object
if !strings.HasSuffix(objectName, ".tar.gz") {
objectName = objectName + ".tar.gz"
logrus.Infof("[GCP] object name must end with '.tar.gz', using %q as the object name", objectName)
}
t.Options = &target.GCPTargetOptions{
Filename: imageType.Filename(),
Region: options.Region,
Os: imageType.Arch().Distro().Name(),
Bucket: options.Bucket,
Object: objectName,
Credentials: gcpCredentials,
}
case *vmwareUploadSettings:
t.Name = "org.osbuild.vmware"
t.Options = &target.VMWareTargetOptions{

View file

@ -220,6 +220,14 @@ it uploads the image to Azure, boots it and tries to ssh into it.
the *Access control (IAM)* section under the newly created resource group.
Here, add the new application with the *Developer* role.
### Setting up GCP upload tests
The following environment variables are required
- `GCP_BUCKET`
- `GCP_REGION`
- `GOOGLE_APPLICATION_CREDENTIALS`
### Setting up OpenStack upload tests
The following environment variables are required

291
test/cases/gcp.sh Executable file
View file

@ -0,0 +1,291 @@
#!/bin/bash
#
# Test osbuild-composer 'upload to gcp' functionality. To do so, create and
# push a blueprint with composer cli. Then, create an instance in gcp
# from the uploaded image. Finally, verify that the instance is running and
# that the package from blueprint was installed.
#
set -euo pipefail
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
# Colorful output.
function greenprint {
echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
}
function get_build_info() {
key="$1"
fname="$2"
if rpm -q --quiet weldr-client; then
key=".body${key}"
fi
jq -r "${key}" "${fname}"
}
# Container image used for cloud provider CLI tools
CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest"
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh
# Check available container runtime
if which podman 2>/dev/null >&2; then
CONTAINER_RUNTIME=podman
elif which docker 2>/dev/null >&2; then
CONTAINER_RUNTIME=docker
else
echo No container runtime found, install podman or docker.
exit 2
fi
function cleanupGCP() {
# since this function can be called at any time, ensure that we don't expand unbound variables
GCP_CMD="${GCP_CMD:-}"
GCP_IMAGE_NAME="${GCP_IMAGE_NAME:-}"
GCP_INSTANCE_NAME="${GCP_INSTANCE_NAME:-}"
GCP_ZONE="${GCP_ZONE:-}"
if [ -n "$GCP_CMD" ]; then
$GCP_CMD compute instances delete --zone="$GCP_ZONE" "$GCP_INSTANCE_NAME"
$GCP_CMD compute images delete "$GCP_IMAGE_NAME"
fi
}
TEMPDIR=$(mktemp -d)
function cleanup() {
greenprint "🧼 Cleaning up"
cleanupGCP
sudo rm -rf "$TEMPDIR"
}
trap cleanup EXIT
# Generate a string, which can be used as a predictable resource name,
# especially when running the test in CI where we may need to clean up
# resources in case the test unexpectedly fails or is canceled
CI="${CI:-false}"
if [[ "$CI" == true ]]; then
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
else
# if not running in Jenkins, generate ID not relying on specific env variables
TEST_ID=$(uuidgen);
fi
# Jenkins sets WORKSPACE to the job workspace, but if this script runs
# outside of Jenkins, we can set up a temporary directory instead.
if [[ ${WORKSPACE:-empty} == empty ]]; then
WORKSPACE=$(mktemp -d)
fi
# Set up temporary files.
GCP_CONFIG=${TEMPDIR}/gcp.toml
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
BLUEPRINT_NAME="test"
COMPOSE_START=${TEMPDIR}/compose-start-${TEST_ID}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${TEST_ID}.json
GCP_TEST_ID_HASH="$(echo -n "$TEST_ID" | sha224sum - | sed -E 's/([a-z0-9])\s+-/\1/')"
GCP_IMAGE_NAME="image-$GCP_TEST_ID_HASH"
SSH_USER="cloud-user"
# Need gcloud to talk to GCP
if ! hash gcloud; then
echo "Using 'gcloud' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
# directory mounted to the container, in which gcloud stores the credentials after logging in
GCP_CMD_CREDS_DIR="${TEMPDIR}/gcloud_credentials"
mkdir "${GCP_CMD_CREDS_DIR}"
GCP_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-v ${GCP_CMD_CREDS_DIR}:/root/.config/gcloud:Z \
-v ${GOOGLE_APPLICATION_CREDENTIALS}:${GOOGLE_APPLICATION_CREDENTIALS}:Z \
-v ${TEMPDIR}:${TEMPDIR}:Z \
${CONTAINER_IMAGE_CLOUD_TOOLS} gcloud --format=json"
else
echo "Using pre-installed 'gcloud' from the system"
GCP_CMD="gcloud --format=json --quiet"
fi
$GCP_CMD --version
# Verify image in Compute Engine on GCP
function verifyInGCP() {
# Authenticate
$GCP_CMD auth activate-service-account --key-file "$GOOGLE_APPLICATION_CREDENTIALS"
# Extract and set the default project to be used for commands
GCP_PROJECT=$(jq -r '.project_id' "$GOOGLE_APPLICATION_CREDENTIALS")
$GCP_CMD config set project "$GCP_PROJECT"
# Add "gitlab-ci-test" label to the image
$GCP_CMD compute images add-labels "$GCP_IMAGE_NAME" --labels=gitlab-ci-test=true
# Verify that the image boots and have customizations applied
# Create SSH keys to use
GCP_SSH_KEY="$TEMPDIR/id_google_compute_engine"
ssh-keygen -t rsa-sha2-512 -f "$GCP_SSH_KEY" -C "$SSH_USER" -N ""
# create the instance
# resource ID can have max 62 characters, the $GCP_TEST_ID_HASH contains 56 characters
GCP_INSTANCE_NAME="vm-$GCP_TEST_ID_HASH"
# Randomize the used GCP zone to prevent hitting "exhausted resources" error on each test re-run
# disable Shellcheck error as the suggested alternatives are less readable for this use case
# shellcheck disable=SC2207
local GCP_ZONES=($($GCP_CMD compute zones list --filter="region=$GCP_REGION" | jq '.[] | select(.status == "UP") | .name' | tr -d '"' | tr '\n' ' '))
GCP_ZONE=${GCP_ZONES[$((RANDOM % ${#GCP_ZONES[@]}))]}
$GCP_CMD compute instances create "$GCP_INSTANCE_NAME" \
--zone="$GCP_ZONE" \
--image-project="$GCP_PROJECT" \
--image="$GCP_IMAGE_NAME" \
--labels=gitlab-ci-test=true
HOST=$($GCP_CMD compute instances describe "$GCP_INSTANCE_NAME" --zone="$GCP_ZONE" --format='get(networkInterfaces[0].accessConfigs[0].natIP)')
echo "⏱ Waiting for GCP instance to respond to ssh"
_instanceWaitSSH "$HOST"
# Verify image
_ssh="$GCP_CMD compute ssh --strict-host-key-checking=no --ssh-key-file=$GCP_SSH_KEY --zone=$GCP_ZONE --quiet $SSH_USER@$GCP_INSTANCE_NAME --"
_instanceCheck "$_ssh"
}
# Wait for the instance to be available over SSH
function _instanceWaitSSH() {
local HOST="$1"
for LOOP_COUNTER in {0..30}; do
if ssh-keyscan "$HOST" > /dev/null 2>&1; then
echo "SSH is up!"
ssh-keyscan "$HOST" | sudo tee -a /root/.ssh/known_hosts
break
fi
echo "Retrying in 5 seconds... $LOOP_COUNTER"
sleep 5
done
}
# Check the instance
function _instanceCheck() {
echo "✔️ Instance checking"
local _ssh="$1"
# Check if unbound is installed
$_ssh rpm -q unbound
}
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-gcp.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-gcp.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
sudo tar -xf "$TARBALL"
sudo rm -f "$TARBALL"
# Move the JSON file into place.
sudo cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
is_weldr_client_installed () {
if rpm --quiet -q weldr-client; then
echo true
else
echo false
fi
}
# Write an GCP TOML file
tee "$GCP_CONFIG" > /dev/null << EOF
provider = "gcp"
[settings]
bucket = "${GCP_BUCKET}"
region = "${GCP_REGION}"
object = "${GCP_IMAGE_NAME}"
credentials = "$(base64 -w 0 "${GOOGLE_APPLICATION_CREDENTIALS}")"
EOF
# Write a basic blueprint for our image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "${BLUEPRINT_NAME}"
description = "Testing blueprint"
version = "0.0.1"
[[packages]]
name = "unbound"
[customizations.services]
enabled = ["unbound"]
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve "$BLUEPRINT_NAME"
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Stop watching the worker journal when exiting.
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
# Start the compose and upload to GCP.
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start "$BLUEPRINT_NAME" gce "$GCP_IMAGE_NAME" "$GCP_CONFIG" | tee "$COMPOSE_START"
COMPOSE_ID=$(get_build_info ".build_id" "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(get_build_info ".queue_status" "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 30
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Kill the journal monitor immediately and remove the trap
sudo pkill -P ${WORKER_JOURNAL_PID}
trap - EXIT
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "❌ Something went wrong with the compose. 😢"
exit 1
fi
verifyInGCP
# Also delete the compose so we don't run out of disk space
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
greenprint "💚 Success"
exit 0