test: adapt to shellcheck v0.9.0

This commit is contained in:
Sanne Raymaekers 2022-12-13 11:09:29 +01:00
parent 86c3036fe3
commit 07a8f3d5ea
9 changed files with 27 additions and 25 deletions

View file

@ -168,7 +168,9 @@ jobs:
with: with:
ignore: vendor # We don't want to fix the code in vendored dependencies ignore: vendor # We don't want to fix the code in vendored dependencies
env: env:
SHELLCHECK_OPTS: -e SC1091 -e SC2002 # don't check /etc/os-release sourcing and allow useless cats to live inside our codebase # don't check /etc/os-release sourcing, allow useless cats to live inside our codebase, and
# allow seemingly unreachable commands
SHELLCHECK_OPTS: -e SC1091 -e SC2002 -e SC2317
rpmlint: rpmlint:
name: "📦 RPMlint" name: "📦 RPMlint"

View file

@ -109,7 +109,7 @@ fi
# Start the db # Start the db
DB_CONTAINER_NAME="osbuild-composer-db" DB_CONTAINER_NAME="osbuild-composer-db"
sudo ${CONTAINER_RUNTIME} run -d --name "${DB_CONTAINER_NAME}" \ sudo "${CONTAINER_RUNTIME}" run -d --name "${DB_CONTAINER_NAME}" \
--health-cmd "pg_isready -U postgres -d osbuildcomposer" --health-interval 2s \ --health-cmd "pg_isready -U postgres -d osbuildcomposer" --health-interval 2s \
--health-timeout 2s --health-retries 10 \ --health-timeout 2s --health-retries 10 \
-e POSTGRES_USER=postgres \ -e POSTGRES_USER=postgres \
@ -119,7 +119,7 @@ sudo ${CONTAINER_RUNTIME} run -d --name "${DB_CONTAINER_NAME}" \
quay.io/osbuild/postgres:13-alpine quay.io/osbuild/postgres:13-alpine
# Dump the logs once to have a little more output # Dump the logs once to have a little more output
sudo ${CONTAINER_RUNTIME} logs osbuild-composer-db sudo "${CONTAINER_RUNTIME}" logs osbuild-composer-db
# Initialize a module in a temp dir so we can get tern without introducing # Initialize a module in a temp dir so we can get tern without introducing
# vendoring inconsistency # vendoring inconsistency
@ -194,7 +194,7 @@ function dump_db() {
set +x set +x
# Save the result, including the manifest, for the job, straight from the db # Save the result, including the manifest, for the job, straight from the db
sudo ${CONTAINER_RUNTIME} exec "${DB_CONTAINER_NAME}" psql -U postgres -d osbuildcomposer -c "SELECT result FROM jobs WHERE type='manifest-id-only'" \ sudo "${CONTAINER_RUNTIME}" exec "${DB_CONTAINER_NAME}" psql -U postgres -d osbuildcomposer -c "SELECT result FROM jobs WHERE type='manifest-id-only'" \
| sudo tee "${ARTIFACTS}/build-result.txt" | sudo tee "${ARTIFACTS}/build-result.txt"
set -x set -x
} }
@ -209,8 +209,8 @@ function cleanups() {
# dump the DB here to ensure that it gets dumped even if the test fails # dump the DB here to ensure that it gets dumped even if the test fails
dump_db dump_db
sudo ${CONTAINER_RUNTIME} kill "${DB_CONTAINER_NAME}" sudo "${CONTAINER_RUNTIME}" kill "${DB_CONTAINER_NAME}"
sudo ${CONTAINER_RUNTIME} rm "${DB_CONTAINER_NAME}" sudo "${CONTAINER_RUNTIME}" rm "${DB_CONTAINER_NAME}"
sudo rm -rf "$WORKDIR" sudo rm -rf "$WORKDIR"
@ -487,7 +487,7 @@ waitForState "building"
sudo systemctl stop "osbuild-remote-worker@*" sudo systemctl stop "osbuild-remote-worker@*"
RETRIED=0 RETRIED=0
for RETRY in {1..10}; do for RETRY in {1..10}; do
ROWS=$(sudo ${CONTAINER_RUNTIME} exec "${DB_CONTAINER_NAME}" psql -U postgres -d osbuildcomposer -c \ ROWS=$(sudo "${CONTAINER_RUNTIME}" exec "${DB_CONTAINER_NAME}" psql -U postgres -d osbuildcomposer -c \
"SELECT retries FROM jobs WHERE id = '$COMPOSE_ID' AND retries = 1") "SELECT retries FROM jobs WHERE id = '$COMPOSE_ID' AND retries = 1")
if grep -q "1 row" <<< "$ROWS"; then if grep -q "1 row" <<< "$ROWS"; then
RETRIED=1 RETRIED=1
@ -502,7 +502,7 @@ if [ "$RETRIED" != 1 ]; then
exit 1 exit 1
fi fi
# remove the job from the queue so the worker doesn't pick it up again # remove the job from the queue so the worker doesn't pick it up again
sudo ${CONTAINER_RUNTIME} exec "${DB_CONTAINER_NAME}" psql -U postgres -d osbuildcomposer -c \ sudo "${CONTAINER_RUNTIME}" exec "${DB_CONTAINER_NAME}" psql -U postgres -d osbuildcomposer -c \
"DELETE FROM jobs WHERE id = '$COMPOSE_ID'" "DELETE FROM jobs WHERE id = '$COMPOSE_ID'"
sudo systemctl start "osbuild-remote-worker@localhost:8700.service" sudo systemctl start "osbuild-remote-worker@localhost:8700.service"

View file

@ -60,7 +60,7 @@ AMI_DATA=${TEMPDIR}/ami-data-${TEST_ID}.json
# We need awscli to talk to AWS. # We need awscli to talk to AWS.
if ! hash aws; then if ! hash aws; then
echo "Using 'awscli' from a container" echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS} sudo "${CONTAINER_RUNTIME}" pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \ -e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
@ -206,7 +206,7 @@ fi
CONTAINER_CLOUD_IMAGE_VAL="quay.io/cloudexperience/cloud-image-val-test:$TAG" CONTAINER_CLOUD_IMAGE_VAL="quay.io/cloudexperience/cloud-image-val-test:$TAG"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_CLOUD_IMAGE_VAL} sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_CLOUD_IMAGE_VAL}"
greenprint "Running cloud-image-val on generated image" greenprint "Running cloud-image-val on generated image"
@ -229,13 +229,13 @@ if [ "$ARCH" == "aarch64" ]; then
sed -i s/t3.medium/a1.large/ "${TEMPDIR}/resource-file.json" sed -i s/t3.medium/a1.large/ "${TEMPDIR}/resource-file.json"
fi fi
sudo ${CONTAINER_RUNTIME} run \ sudo "${CONTAINER_RUNTIME}" run \
-a stdout -a stderr \ -a stdout -a stderr \
-e AWS_ACCESS_KEY_ID="${V2_AWS_ACCESS_KEY_ID}" \ -e AWS_ACCESS_KEY_ID="${V2_AWS_ACCESS_KEY_ID}" \
-e AWS_SECRET_ACCESS_KEY="${V2_AWS_SECRET_ACCESS_KEY}" \ -e AWS_SECRET_ACCESS_KEY="${V2_AWS_SECRET_ACCESS_KEY}" \
-e AWS_REGION="${AWS_REGION}" \ -e AWS_REGION="${AWS_REGION}" \
-v "${TEMPDIR}":/tmp:Z \ -v "${TEMPDIR}":/tmp:Z \
${CONTAINER_CLOUD_IMAGE_VAL} \ "${CONTAINER_CLOUD_IMAGE_VAL}" \
python cloud-image-val.py -r /tmp/resource-file.json -d -o /tmp/report.xml -m 'not pub' && RESULTS=1 || RESULTS=0 python cloud-image-val.py -r /tmp/resource-file.json -d -o /tmp/report.xml -m 'not pub' && RESULTS=1 || RESULTS=0
mv "${TEMPDIR}"/report.html "${ARTIFACTS}" mv "${TEMPDIR}"/report.html "${ARTIFACTS}"

View file

@ -45,7 +45,7 @@ AWS_S3_PROVIDER_CONFIG=${TEMPDIR}/aws.toml
# We need awscli to talk to AWS. # We need awscli to talk to AWS.
if ! hash aws; then if ! hash aws; then
echo "Using 'awscli' from a container" echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS} sudo "${CONTAINER_RUNTIME}" pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \ -e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \

View file

@ -43,7 +43,7 @@ trap cleanup EXIT
# Terraform needs azure-cli to talk to Azure. # Terraform needs azure-cli to talk to Azure.
if ! hash az; then if ! hash az; then
echo "Using 'azure-cli' from a container" echo "Using 'azure-cli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS} sudo "${CONTAINER_RUNTIME}" pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
# directory mounted to the container, in which azure-cli stores the credentials after logging in # directory mounted to the container, in which azure-cli stores the credentials after logging in
AZURE_CMD_CREDS_DIR="${TEMPDIR}/azure-cli_credentials" AZURE_CMD_CREDS_DIR="${TEMPDIR}/azure-cli_credentials"
@ -206,7 +206,7 @@ fi
CONTAINER_CLOUD_IMAGE_VAL="quay.io/cloudexperience/cloud-image-val-test:$TAG" CONTAINER_CLOUD_IMAGE_VAL="quay.io/cloudexperience/cloud-image-val-test:$TAG"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_CLOUD_IMAGE_VAL} sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_CLOUD_IMAGE_VAL}"
greenprint "Running cloud-image-val on generated image" greenprint "Running cloud-image-val on generated image"
@ -225,14 +225,14 @@ tee "${TEMPDIR}/resource-file.json" <<EOF
} }
EOF EOF
sudo ${CONTAINER_RUNTIME} run \ sudo "${CONTAINER_RUNTIME}" run \
-a stdout -a stderr \ -a stdout -a stderr \
-e ARM_CLIENT_ID="${V2_AZURE_CLIENT_ID}" \ -e ARM_CLIENT_ID="${V2_AZURE_CLIENT_ID}" \
-e ARM_CLIENT_SECRET="${V2_AZURE_CLIENT_SECRET}" \ -e ARM_CLIENT_SECRET="${V2_AZURE_CLIENT_SECRET}" \
-e ARM_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID}" \ -e ARM_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID}" \
-e ARM_TENANT_ID="${AZURE_TENANT_ID}" \ -e ARM_TENANT_ID="${AZURE_TENANT_ID}" \
-v "${TEMPDIR}":/tmp:Z \ -v "${TEMPDIR}":/tmp:Z \
${CONTAINER_CLOUD_IMAGE_VAL} \ "${CONTAINER_CLOUD_IMAGE_VAL}" \
python cloud-image-val.py -r /tmp/resource-file.json -d -o /tmp/report.xml -m 'not pub' && RESULTS=1 || RESULTS=0 python cloud-image-val.py -r /tmp/resource-file.json -d -o /tmp/report.xml -m 'not pub' && RESULTS=1 || RESULTS=0
mv "${TEMPDIR}"/report.html "${ARTIFACTS}" mv "${TEMPDIR}"/report.html "${ARTIFACTS}"

View file

@ -77,7 +77,7 @@ SSH_USER="cloud-user"
# Need gcloud to talk to GCP # Need gcloud to talk to GCP
if ! hash gcloud; then if ! hash gcloud; then
echo "Using 'gcloud' from a container" echo "Using 'gcloud' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS} sudo "${CONTAINER_RUNTIME}" pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
# directory mounted to the container, in which gcloud stores the credentials after logging in # directory mounted to the container, in which gcloud stores the credentials after logging in
GCP_CMD_CREDS_DIR="${TEMPDIR}/gcloud_credentials" GCP_CMD_CREDS_DIR="${TEMPDIR}/gcloud_credentials"

View file

@ -47,7 +47,7 @@ MINIO_PROVIDER_CONFIG=${TEMPDIR}/minio.toml
# We need MinIO Client to talk to the MinIO Server. # We need MinIO Client to talk to the MinIO Server.
if ! hash mc; then if ! hash mc; then
echo "Using 'mc' from a container" echo "Using 'mc' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_MINIO_CLIENT} sudo "${CONTAINER_RUNTIME}" pull ${CONTAINER_MINIO_CLIENT}
MC_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ MC_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-v ${MINIO_CONFIG_DIR}:${MINIO_CONFIG_DIR}:Z \ -v ${MINIO_CONFIG_DIR}:${MINIO_CONFIG_DIR}:Z \

View file

@ -78,7 +78,7 @@ fi
CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest" CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest"
greenprint "Pulling and running composer container for this commit" greenprint "Pulling and running composer container for this commit"
sudo ${CONTAINER_RUNTIME} pull --creds "${V2_QUAY_USERNAME}":"${V2_QUAY_PASSWORD}" \ sudo "${CONTAINER_RUNTIME}" pull --creds "${V2_QUAY_USERNAME}":"${V2_QUAY_PASSWORD}" \
"quay.io/osbuild/osbuild-composer-ubi-pr:${CI_COMMIT_SHA}" "quay.io/osbuild/osbuild-composer-ubi-pr:${CI_COMMIT_SHA}"
cat <<EOF | sudo tee "/etc/osbuild-composer/osbuild-composer.toml" cat <<EOF | sudo tee "/etc/osbuild-composer/osbuild-composer.toml"
@ -93,7 +93,7 @@ EOF
# The host entitlement doesn't get picked up by composer # The host entitlement doesn't get picked up by composer
# see https://github.com/osbuild/osbuild-composer/issues/1845 # see https://github.com/osbuild/osbuild-composer/issues/1845
sudo ${CONTAINER_RUNTIME} run \ sudo "${CONTAINER_RUNTIME}" run \
--name=composer \ --name=composer \
-d \ -d \
-v /etc/osbuild-composer:/etc/osbuild-composer:Z \ -v /etc/osbuild-composer:/etc/osbuild-composer:Z \
@ -166,8 +166,8 @@ function cleanup() {
set +eu set +eu
cleanupAWSS3 cleanupAWSS3
sudo ${CONTAINER_RUNTIME} kill composer sudo "${CONTAINER_RUNTIME}" kill composer
sudo ${CONTAINER_RUNTIME} rm composer sudo "${CONTAINER_RUNTIME}" rm composer
sudo rm -rf "$WORKDIR" sudo rm -rf "$WORKDIR"
@ -216,7 +216,7 @@ popd
greenprint "Installing aws client tools" greenprint "Installing aws client tools"
if ! hash aws; then if ! hash aws; then
echo "Using 'awscli' from a container" echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS} sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_IMAGE_CLOUD_TOOLS}"
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \ -e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \

View file

@ -29,4 +29,4 @@ sudo mv private.key public.crt "${CERTS_DIR}"
popd popd
# Test upload to HTTPS S3 server # Test upload to HTTPS S3 server
/usr/libexec/osbuild-composer-test/generic_s3_test.sh "${CERTS_DIR}" ${CA_CERT_NAME:-""} /usr/libexec/osbuild-composer-test/generic_s3_test.sh "${CERTS_DIR}" "${CA_CERT_NAME:-""}"