ci: rotate secret names
Signed-off-by: Ondřej Budai <ondrej@budai.cz>
This commit is contained in:
parent
65e429fc4a
commit
58423c262b
13 changed files with 45 additions and 45 deletions
|
|
@ -30,8 +30,8 @@ type awsCredentials struct {
|
||||||
// If none of the environment variables is set, it returns nil.
|
// If none of the environment variables is set, it returns nil.
|
||||||
// If some but not all environment variables are set, it returns an error.
|
// If some but not all environment variables are set, it returns an error.
|
||||||
func GetAWSCredentialsFromEnv() (*awsCredentials, error) {
|
func GetAWSCredentialsFromEnv() (*awsCredentials, error) {
|
||||||
accessKeyId, akExists := os.LookupEnv("AWS_ACCESS_KEY_ID")
|
accessKeyId, akExists := os.LookupEnv("V2_AWS_ACCESS_KEY_ID")
|
||||||
secretAccessKey, sakExists := os.LookupEnv("AWS_SECRET_ACCESS_KEY")
|
secretAccessKey, sakExists := os.LookupEnv("V2_AWS_SECRET_ACCESS_KEY")
|
||||||
region, regionExists := os.LookupEnv("AWS_REGION")
|
region, regionExists := os.LookupEnv("AWS_REGION")
|
||||||
bucket, bucketExists := os.LookupEnv("AWS_BUCKET")
|
bucket, bucketExists := os.LookupEnv("AWS_BUCKET")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -52,8 +52,8 @@ func GetAzureCredentialsFromEnv() (*azureCredentials, error) {
|
||||||
storageAccessKey, sakExists := os.LookupEnv("AZURE_STORAGE_ACCESS_KEY")
|
storageAccessKey, sakExists := os.LookupEnv("AZURE_STORAGE_ACCESS_KEY")
|
||||||
containerName, cExists := os.LookupEnv("AZURE_CONTAINER_NAME")
|
containerName, cExists := os.LookupEnv("AZURE_CONTAINER_NAME")
|
||||||
subscriptionId, siExists := os.LookupEnv("AZURE_SUBSCRIPTION_ID")
|
subscriptionId, siExists := os.LookupEnv("AZURE_SUBSCRIPTION_ID")
|
||||||
clientId, ciExists := os.LookupEnv("AZURE_CLIENT_ID")
|
clientId, ciExists := os.LookupEnv("V2_AZURE_CLIENT_ID")
|
||||||
clientSecret, csExists := os.LookupEnv("AZURE_CLIENT_SECRET")
|
clientSecret, csExists := os.LookupEnv("V2_AZURE_CLIENT_SECRET")
|
||||||
tenantId, tiExists := os.LookupEnv("AZURE_TENANT_ID")
|
tenantId, tiExists := os.LookupEnv("AZURE_TENANT_ID")
|
||||||
location, lExists := os.LookupEnv("AZURE_LOCATION")
|
location, lExists := os.LookupEnv("AZURE_LOCATION")
|
||||||
resourceGroup, rgExists := os.LookupEnv("AZURE_RESOURCE_GROUP")
|
resourceGroup, rgExists := os.LookupEnv("AZURE_RESOURCE_GROUP")
|
||||||
|
|
|
||||||
|
|
@ -20,5 +20,5 @@ podman \
|
||||||
# Push to reuse later in the pipeline (see regression tests)
|
# Push to reuse later in the pipeline (see regression tests)
|
||||||
BRANCH_NAME="${BRANCH_NAME:-${CI_COMMIT_BRANCH}}"
|
BRANCH_NAME="${BRANCH_NAME:-${CI_COMMIT_BRANCH}}"
|
||||||
podman push \
|
podman push \
|
||||||
--creds "${QUAY_USERNAME}":"${QUAY_PASSWORD}" \
|
--creds "${V2_QUAY_USERNAME}":"${V2_QUAY_PASSWORD}" \
|
||||||
"${IMAGE_NAME}:${IMAGE_TAG}"
|
"${IMAGE_NAME}:${IMAGE_TAG}"
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ gpgkey=https://packages.microsoft.com/keys/microsoft.asc" | sudo tee /etc/yum.re
|
||||||
az version
|
az version
|
||||||
fi
|
fi
|
||||||
|
|
||||||
az login --service-principal --username "${AZURE_CLIENT_ID}" --password "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}"
|
az login --service-principal --username "${V2_AZURE_CLIENT_ID}" --password "${V2_AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}"
|
||||||
|
|
||||||
# List all resources from AZURE_RESOURCE_GROUP
|
# List all resources from AZURE_RESOURCE_GROUP
|
||||||
RESOURCE_LIST=$(az resource list -g "$AZURE_RESOURCE_GROUP")
|
RESOURCE_LIST=$(az resource list -g "$AZURE_RESOURCE_GROUP")
|
||||||
|
|
|
||||||
|
|
@ -169,8 +169,8 @@ it uploads the image to Azure, boots it and tries to ssh into it.
|
||||||
- `AZURE_STORAGE_ACCESS_KEY`
|
- `AZURE_STORAGE_ACCESS_KEY`
|
||||||
- `AZURE_CONTAINER_NAME`
|
- `AZURE_CONTAINER_NAME`
|
||||||
- `AZURE_SUBSCRIPTION_ID`
|
- `AZURE_SUBSCRIPTION_ID`
|
||||||
- `AZURE_CLIENT_ID`
|
- `V2_AZURE_CLIENT_ID`
|
||||||
- `AZURE_CLIENT_SECRET`
|
- `V2_AZURE_CLIENT_SECRET`
|
||||||
- `AZURE_TENANT_ID`
|
- `AZURE_TENANT_ID`
|
||||||
- `AZURE_LOCATION`
|
- `AZURE_LOCATION`
|
||||||
- `AZURE_RESOURCE_GROUP`
|
- `AZURE_RESOURCE_GROUP`
|
||||||
|
|
@ -207,10 +207,10 @@ it uploads the image to Azure, boots it and tries to ssh into it.
|
||||||
|
|
||||||
When it’s created, open it. In the overview, you can see
|
When it’s created, open it. In the overview, you can see
|
||||||
the Application (client) ID and the Directory (tenant) ID. These are your
|
the Application (client) ID and the Directory (tenant) ID. These are your
|
||||||
`AZURE_CLIENT_ID` and `AZURE_TENANT_ID`.
|
`V2_AZURE_CLIENT_ID` and `AZURE_TENANT_ID`.
|
||||||
|
|
||||||
Now, go to *Manage > Certificates & Secrets* under your new application
|
Now, go to *Manage > Certificates & Secrets* under your new application
|
||||||
and create a new client secret. The is your `AZURE_CLIENT_SECRET`.
|
and create a new client secret. The is your `V2_AZURE_CLIENT_SECRET`.
|
||||||
|
|
||||||
5) The last step is to give the new application access to the resource group.
|
5) The last step is to give the new application access to the resource group.
|
||||||
This step must be done by Azure administrator (@larskarlitski): Go to
|
This step must be done by Azure administrator (@larskarlitski): Go to
|
||||||
|
|
@ -330,8 +330,8 @@ The following environment variables are required
|
||||||
|
|
||||||
- `AWS_REGION`
|
- `AWS_REGION`
|
||||||
- `AWS_BUCKET`
|
- `AWS_BUCKET`
|
||||||
- `AWS_ACCESS_KEY_ID`
|
- `V2_AWS_ACCESS_KEY_ID`
|
||||||
- `AWS_SECRET_ACCESS_KEY`
|
- `V2_AWS_SECRET_ACCESS_KEY`
|
||||||
- `AWS_API_TEST_SHARE_ACCOUNT`
|
- `AWS_API_TEST_SHARE_ACCOUNT`
|
||||||
|
|
||||||
To execute the AWS integration tests, complete steps from *Cloud API integration testing*
|
To execute the AWS integration tests, complete steps from *Cloud API integration testing*
|
||||||
|
|
|
||||||
|
|
@ -125,7 +125,7 @@ esac
|
||||||
|
|
||||||
# Check that needed variables are set to access AWS.
|
# Check that needed variables are set to access AWS.
|
||||||
function checkEnvAWS() {
|
function checkEnvAWS() {
|
||||||
printenv AWS_REGION AWS_BUCKET AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
|
printenv AWS_REGION AWS_BUCKET V2_AWS_ACCESS_KEY_ID V2_AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check that needed variables are set to access GCP.
|
# Check that needed variables are set to access GCP.
|
||||||
|
|
@ -135,7 +135,7 @@ function checkEnvGCP() {
|
||||||
|
|
||||||
# Check that needed variables are set to access Azure.
|
# Check that needed variables are set to access Azure.
|
||||||
function checkEnvAzure() {
|
function checkEnvAzure() {
|
||||||
printenv AZURE_TENANT_ID AZURE_SUBSCRIPTION_ID AZURE_RESOURCE_GROUP AZURE_LOCATION AZURE_CLIENT_ID AZURE_CLIENT_SECRET > /dev/null
|
printenv AZURE_TENANT_ID AZURE_SUBSCRIPTION_ID AZURE_RESOURCE_GROUP AZURE_LOCATION V2_AZURE_CLIENT_ID V2_AZURE_CLIENT_SECRET > /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check that needed variables are set to register to RHSM (RHEL only)
|
# Check that needed variables are set to register to RHSM (RHEL only)
|
||||||
|
|
@ -288,8 +288,8 @@ function installClientAWS() {
|
||||||
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
|
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
|
||||||
|
|
||||||
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
|
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
|
||||||
-e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
|
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
|
||||||
-e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
|
-e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \
|
||||||
-v ${WORKDIR}:${WORKDIR}:Z \
|
-v ${WORKDIR}:${WORKDIR}:Z \
|
||||||
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on"
|
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on"
|
||||||
else
|
else
|
||||||
|
|
@ -487,13 +487,13 @@ function createReqFileAWS() {
|
||||||
"options": {
|
"options": {
|
||||||
"region": "${AWS_REGION}",
|
"region": "${AWS_REGION}",
|
||||||
"s3": {
|
"s3": {
|
||||||
"access_key_id": "${AWS_ACCESS_KEY_ID}",
|
"access_key_id": "${V2_AWS_ACCESS_KEY_ID}",
|
||||||
"secret_access_key": "${AWS_SECRET_ACCESS_KEY}",
|
"secret_access_key": "${V2_AWS_SECRET_ACCESS_KEY}",
|
||||||
"bucket": "${AWS_BUCKET}"
|
"bucket": "${AWS_BUCKET}"
|
||||||
},
|
},
|
||||||
"ec2": {
|
"ec2": {
|
||||||
"access_key_id": "${AWS_ACCESS_KEY_ID}",
|
"access_key_id": "${V2_AWS_ACCESS_KEY_ID}",
|
||||||
"secret_access_key": "${AWS_SECRET_ACCESS_KEY}",
|
"secret_access_key": "${V2_AWS_SECRET_ACCESS_KEY}",
|
||||||
"snapshot_name": "${AWS_SNAPSHOT_NAME}",
|
"snapshot_name": "${AWS_SNAPSHOT_NAME}",
|
||||||
"share_with_accounts": ["${AWS_API_TEST_SHARE_ACCOUNT}"]
|
"share_with_accounts": ["${AWS_API_TEST_SHARE_ACCOUNT}"]
|
||||||
}
|
}
|
||||||
|
|
@ -532,8 +532,8 @@ function createReqFileAWSS3() {
|
||||||
"options": {
|
"options": {
|
||||||
"region": "${AWS_REGION}",
|
"region": "${AWS_REGION}",
|
||||||
"s3": {
|
"s3": {
|
||||||
"access_key_id": "${AWS_ACCESS_KEY_ID}",
|
"access_key_id": "${V2_AWS_ACCESS_KEY_ID}",
|
||||||
"secret_access_key": "${AWS_SECRET_ACCESS_KEY}",
|
"secret_access_key": "${V2_AWS_SECRET_ACCESS_KEY}",
|
||||||
"bucket": "${AWS_BUCKET}"
|
"bucket": "${AWS_BUCKET}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1040,7 +1040,7 @@ function verifyInGCP() {
|
||||||
# Verify image in Azure
|
# Verify image in Azure
|
||||||
function verifyInAzure() {
|
function verifyInAzure() {
|
||||||
set +x
|
set +x
|
||||||
$AZURE_CMD login --service-principal --username "${AZURE_CLIENT_ID}" --password "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}"
|
$AZURE_CMD login --service-principal --username "${V2_AZURE_CLIENT_ID}" --password "${V2_AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}"
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
# verify that the image exists
|
# verify that the image exists
|
||||||
|
|
|
||||||
|
|
@ -90,8 +90,8 @@ credentials="$AWS_CREDS_FILE"
|
||||||
EOF
|
EOF
|
||||||
cat <<EOF | sudo tee "$AWS_CREDS_FILE"
|
cat <<EOF | sudo tee "$AWS_CREDS_FILE"
|
||||||
[default]
|
[default]
|
||||||
aws_access_key_id = $AWS_ACCESS_KEY_ID
|
aws_access_key_id = $V2_AWS_ACCESS_KEY_ID
|
||||||
aws_secret_access_key = $AWS_SECRET_ACCESS_KEY
|
aws_secret_access_key = $V2_AWS_SECRET_ACCESS_KEY
|
||||||
EOF
|
EOF
|
||||||
sudo systemctl restart osbuild-composer osbuild-composer-worker@*.service
|
sudo systemctl restart osbuild-composer osbuild-composer-worker@*.service
|
||||||
}
|
}
|
||||||
|
|
@ -133,7 +133,7 @@ esac
|
||||||
|
|
||||||
# Check that needed variables are set to access AWS.
|
# Check that needed variables are set to access AWS.
|
||||||
function checkEnvAWS() {
|
function checkEnvAWS() {
|
||||||
printenv AWS_REGION AWS_BUCKET AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
|
printenv AWS_REGION AWS_BUCKET V2_AWS_ACCESS_KEY_ID V2_AWS_SECRET_ACCESS_KEY AWS_API_TEST_SHARE_ACCOUNT > /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check that needed variables are set to register to RHSM (RHEL only)
|
# Check that needed variables are set to register to RHSM (RHEL only)
|
||||||
|
|
@ -218,8 +218,8 @@ function installClientAWS() {
|
||||||
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
|
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
|
||||||
|
|
||||||
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
|
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
|
||||||
-e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
|
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
|
||||||
-e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
|
-e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \
|
||||||
-v ${WORKDIR}:${WORKDIR}:Z \
|
-v ${WORKDIR}:${WORKDIR}:Z \
|
||||||
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on"
|
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on"
|
||||||
else
|
else
|
||||||
|
|
|
||||||
|
|
@ -58,8 +58,8 @@ if ! hash aws; then
|
||||||
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
|
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
|
||||||
|
|
||||||
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
|
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
|
||||||
-e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
|
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
|
||||||
-e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
|
-e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \
|
||||||
-v ${TEMPDIR}:${TEMPDIR}:Z \
|
-v ${TEMPDIR}:${TEMPDIR}:Z \
|
||||||
-v ${SSH_DATA_DIR}:${SSH_DATA_DIR}:Z \
|
-v ${SSH_DATA_DIR}:${SSH_DATA_DIR}:Z \
|
||||||
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on"
|
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on"
|
||||||
|
|
@ -129,8 +129,8 @@ tee "$AWS_CONFIG" > /dev/null << EOF
|
||||||
provider = "aws"
|
provider = "aws"
|
||||||
|
|
||||||
[settings]
|
[settings]
|
||||||
accessKeyID = "${AWS_ACCESS_KEY_ID}"
|
accessKeyID = "${V2_AWS_ACCESS_KEY_ID}"
|
||||||
secretAccessKey = "${AWS_SECRET_ACCESS_KEY}"
|
secretAccessKey = "${V2_AWS_SECRET_ACCESS_KEY}"
|
||||||
bucket = "${AWS_BUCKET}"
|
bucket = "${AWS_BUCKET}"
|
||||||
region = "${AWS_REGION}"
|
region = "${AWS_REGION}"
|
||||||
key = "${IMAGE_KEY}"
|
key = "${IMAGE_KEY}"
|
||||||
|
|
|
||||||
|
|
@ -233,8 +233,8 @@ export TF_VAR_TEST_ID="$TEST_ID"
|
||||||
# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/image#argument-reference
|
# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/image#argument-reference
|
||||||
export TF_VAR_HYPER_V_GEN="${HYPER_V_GEN}"
|
export TF_VAR_HYPER_V_GEN="${HYPER_V_GEN}"
|
||||||
export BLOB_URL="https://$AZURE_STORAGE_ACCOUNT.blob.core.windows.net/$AZURE_CONTAINER_NAME/$IMAGE_KEY.vhd"
|
export BLOB_URL="https://$AZURE_STORAGE_ACCOUNT.blob.core.windows.net/$AZURE_CONTAINER_NAME/$IMAGE_KEY.vhd"
|
||||||
export ARM_CLIENT_ID="$AZURE_CLIENT_ID" > /dev/null
|
export ARM_CLIENT_ID="$V2_AZURE_CLIENT_ID" > /dev/null
|
||||||
export ARM_CLIENT_SECRET="$AZURE_CLIENT_SECRET" > /dev/null
|
export ARM_CLIENT_SECRET="$V2_AZURE_CLIENT_SECRET" > /dev/null
|
||||||
export ARM_SUBSCRIPTION_ID="$AZURE_SUBSCRIPTION_ID" > /dev/null
|
export ARM_SUBSCRIPTION_ID="$AZURE_SUBSCRIPTION_ID" > /dev/null
|
||||||
export ARM_TENANT_ID="$AZURE_TENANT_ID" > /dev/null
|
export ARM_TENANT_ID="$AZURE_TENANT_ID" > /dev/null
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -191,7 +191,7 @@ wait_for_ssh_up () {
|
||||||
clean_up () {
|
clean_up () {
|
||||||
greenprint "🧼 Cleaning up"
|
greenprint "🧼 Cleaning up"
|
||||||
# Remove tag from quay.io repo
|
# Remove tag from quay.io repo
|
||||||
skopeo delete --creds "${QUAY_USERNAME}:${QUAY_PASSWORD}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
skopeo delete --creds "${V2_QUAY_USERNAME}:${V2_QUAY_PASSWORD}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
||||||
|
|
||||||
# Clear vm
|
# Clear vm
|
||||||
if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then
|
if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then
|
||||||
|
|
@ -304,9 +304,9 @@ sudo podman rmi -f -a
|
||||||
# Deal with stage repo image
|
# Deal with stage repo image
|
||||||
greenprint "🗜 Pushing image to quay.io"
|
greenprint "🗜 Pushing image to quay.io"
|
||||||
IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}"
|
IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}"
|
||||||
skopeo copy --dest-creds "${QUAY_USERNAME}:${QUAY_PASSWORD}" "oci-archive:${IMAGE_FILENAME}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
skopeo copy --dest-creds "${V2_QUAY_USERNAME}:${V2_QUAY_PASSWORD}" "oci-archive:${IMAGE_FILENAME}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
||||||
greenprint "Downloading image from quay.io"
|
greenprint "Downloading image from quay.io"
|
||||||
sudo podman login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_PASSWORD}"
|
sudo podman login quay.io --username "${V2_QUAY_USERNAME}" --password "${V2_QUAY_PASSWORD}"
|
||||||
sudo podman pull "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
sudo podman pull "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
||||||
sudo podman images
|
sudo podman images
|
||||||
greenprint "🗜 Running the image"
|
greenprint "🗜 Running the image"
|
||||||
|
|
|
||||||
|
|
@ -215,7 +215,7 @@ wait_for_ssh_up () {
|
||||||
clean_up () {
|
clean_up () {
|
||||||
greenprint "🧼 Cleaning up"
|
greenprint "🧼 Cleaning up"
|
||||||
# Remove tag from quay.io repo
|
# Remove tag from quay.io repo
|
||||||
skopeo delete --creds "${QUAY_USERNAME}:${QUAY_PASSWORD}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
skopeo delete --creds "${V2_QUAY_USERNAME}:${V2_QUAY_PASSWORD}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
||||||
|
|
||||||
# Clear vm
|
# Clear vm
|
||||||
if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then
|
if [[ $(sudo virsh domstate "${IMAGE_KEY}-uefi") == "running" ]]; then
|
||||||
|
|
@ -329,7 +329,7 @@ sudo podman rmi -f -a
|
||||||
# Deal with stage repo image
|
# Deal with stage repo image
|
||||||
greenprint "🗜 Pushing image to quay.io"
|
greenprint "🗜 Pushing image to quay.io"
|
||||||
IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}"
|
IMAGE_FILENAME="${COMPOSE_ID}-${CONTAINER_FILENAME}"
|
||||||
skopeo copy --dest-creds "${QUAY_USERNAME}:${QUAY_PASSWORD}" "oci-archive:${IMAGE_FILENAME}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
skopeo copy --dest-creds "${V2_QUAY_USERNAME}:${V2_QUAY_PASSWORD}" "oci-archive:${IMAGE_FILENAME}" "${QUAY_REPO_URL}:${QUAY_REPO_TAG}"
|
||||||
# Clear image file
|
# Clear image file
|
||||||
sudo rm -f "$IMAGE_FILENAME"
|
sudo rm -f "$IMAGE_FILENAME"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ rpm -q "$WORKER_RPM"
|
||||||
WELDR_DIR="$(mktemp -d)"
|
WELDR_DIR="$(mktemp -d)"
|
||||||
WELDR_SOCK="$WELDR_DIR/api.socket"
|
WELDR_SOCK="$WELDR_DIR/api.socket"
|
||||||
|
|
||||||
sudo podman pull --creds "${QUAY_USERNAME}":"${QUAY_PASSWORD}" \
|
sudo podman pull --creds "${V2_QUAY_USERNAME}":"${V2_QUAY_PASSWORD}" \
|
||||||
"quay.io/osbuild/osbuild-composer-ubi-pr:${CI_COMMIT_SHA}"
|
"quay.io/osbuild/osbuild-composer-ubi-pr:${CI_COMMIT_SHA}"
|
||||||
|
|
||||||
# The host entitlement doesn't get picked up by composer
|
# The host entitlement doesn't get picked up by composer
|
||||||
|
|
|
||||||
|
|
@ -45,13 +45,13 @@ if [ -n "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if Azure credentials are defined in the env, create the credentials file
|
# if Azure credentials are defined in the env, create the credentials file
|
||||||
AZURE_CLIENT_ID="${AZURE_CLIENT_ID:-}"
|
V2_AZURE_CLIENT_ID="${V2_AZURE_CLIENT_ID:-}"
|
||||||
AZURE_CLIENT_SECRET="${AZURE_CLIENT_SECRET:-}"
|
V2_AZURE_CLIENT_SECRET="${V2_AZURE_CLIENT_SECRET:-}"
|
||||||
if [[ -n "$AZURE_CLIENT_ID" && -n "$AZURE_CLIENT_SECRET" ]]; then
|
if [[ -n "$V2_AZURE_CLIENT_ID" && -n "$V2_AZURE_CLIENT_SECRET" ]]; then
|
||||||
set +x
|
set +x
|
||||||
sudo tee /etc/osbuild-worker/azure-credentials.toml > /dev/null << EOF
|
sudo tee /etc/osbuild-worker/azure-credentials.toml > /dev/null << EOF
|
||||||
client_id = "$AZURE_CLIENT_ID"
|
client_id = "$V2_AZURE_CLIENT_ID"
|
||||||
client_secret = "$AZURE_CLIENT_SECRET"
|
client_secret = "$V2_AZURE_CLIENT_SECRET"
|
||||||
EOF
|
EOF
|
||||||
sudo tee -a /etc/osbuild-worker/osbuild-worker.toml > /dev/null << EOF
|
sudo tee -a /etc/osbuild-worker/osbuild-worker.toml > /dev/null << EOF
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue