Upload to HTTPS S3 - Support self signed certificate
API --- Allow the user to pass the CA public certification or skip the verification AWSCloud -------- Restore the old version of newAwsFromCreds for access to AWS Create a new method newAwsFromCredsWithEndpoint for Generic S3 which sets the endpoint and optionally overrides the CA Bundle or skips the SSL certificate verification jobimpl-osbuild --------------- Update with the new parameters osbuild-upload-generic-s3 ------------------------- Add ca-bunlde and skip-ssl-verification flags tests ----- Split the tests into http, https with certificate and https skip certificate check Create a new base test for S3 over HTTPS for secure and insecure Move the generic S3 test to tools to reuse for secure and insecure connections All S3 tests now use the aws cli tool Update the libvirt test to be able to download over HTTPS Update the RPM spec Kill container with sudo
This commit is contained in:
parent
cd49c932a2
commit
8407c97d96
15 changed files with 331 additions and 38 deletions
|
|
@ -510,10 +510,20 @@ libvirt.sh:
|
|||
variables:
|
||||
SCRIPT: libvirt.sh
|
||||
|
||||
generic_s3.sh:
|
||||
generic_s3_http.sh:
|
||||
extends: .libvirt_integration
|
||||
variables:
|
||||
SCRIPT: generic_s3.sh
|
||||
SCRIPT: generic_s3_http.sh
|
||||
|
||||
generic_s3_https_secure.sh:
|
||||
extends: .libvirt_integration
|
||||
variables:
|
||||
SCRIPT: generic_s3_https_secure.sh
|
||||
|
||||
generic_s3_https_insecure.sh:
|
||||
extends: .libvirt_integration
|
||||
variables:
|
||||
SCRIPT: generic_s3_https_insecure.sh
|
||||
|
||||
aws_s3.sh:
|
||||
extends: .libvirt_integration
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@ func main() {
|
|||
var sessionToken string
|
||||
var region string
|
||||
var endpoint string
|
||||
var caBundle string
|
||||
var skipSSLVerification bool
|
||||
var bucketName string
|
||||
var keyName string
|
||||
var filename string
|
||||
|
|
@ -23,12 +25,14 @@ func main() {
|
|||
flag.StringVar(&sessionToken, "session-token", "", "session token")
|
||||
flag.StringVar(®ion, "region", "", "target region")
|
||||
flag.StringVar(&endpoint, "endpoint", "", "target endpoint")
|
||||
flag.StringVar(&caBundle, "ca-bundle", "", "path to CA bundle for the S3 server")
|
||||
flag.BoolVar(&skipSSLVerification, "skip-ssl-verification", false, "Skip the verification of the server SSL certificate")
|
||||
flag.StringVar(&bucketName, "bucket", "", "target S3 bucket name")
|
||||
flag.StringVar(&keyName, "key", "", "target S3 key name")
|
||||
flag.StringVar(&filename, "image", "", "image file to upload")
|
||||
flag.Parse()
|
||||
|
||||
a, err := awscloud.NewForEndpoint(endpoint, region, accessKeyID, secretAccessKey, sessionToken)
|
||||
a, err := awscloud.NewForEndpoint(endpoint, region, accessKeyID, secretAccessKey, sessionToken, caBundle, skipSSLVerification)
|
||||
if err != nil {
|
||||
println(err.Error())
|
||||
return
|
||||
|
|
|
|||
|
|
@ -53,12 +53,12 @@ func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string
|
|||
}
|
||||
}
|
||||
|
||||
func (impl *OSBuildJobImpl) getAWSForEndpoint(endpoint, region, accessId, secret, token string) (*awscloud.AWS, error) {
|
||||
if accessId != "" && secret != "" {
|
||||
return awscloud.NewForEndpoint(endpoint, region, accessId, secret, token)
|
||||
func (impl *OSBuildJobImpl) getAWSForEndpoint(options *target.GenericS3TargetOptions) (*awscloud.AWS, error) {
|
||||
if options.AccessKeyID != "" && options.SecretAccessKey != "" {
|
||||
return awscloud.NewForEndpoint(options.Endpoint, options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken, options.CABundle, options.SkipSSLVerification)
|
||||
}
|
||||
if impl.GenericS3Creds != "" {
|
||||
return awscloud.NewForEndpointFromFile(impl.GenericS3Creds, endpoint, region)
|
||||
return awscloud.NewForEndpointFromFile(impl.GenericS3Creds, options.Endpoint, options.Region, options.CABundle, options.SkipSSLVerification)
|
||||
}
|
||||
return nil, fmt.Errorf("no credentials found")
|
||||
}
|
||||
|
|
@ -437,7 +437,7 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error {
|
|||
return nil
|
||||
}
|
||||
case *target.GenericS3TargetOptions:
|
||||
a, err := impl.getAWSForEndpoint(options.Endpoint, options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken)
|
||||
a, err := impl.getAWSForEndpoint(options)
|
||||
if err != nil {
|
||||
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
package awscloud
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
|
|
@ -22,14 +24,11 @@ type AWS struct {
|
|||
}
|
||||
|
||||
// Create a new session from the credentials and the region and returns an *AWS object initialized with it.
|
||||
func newAwsFromCreds(creds *credentials.Credentials, region string, endpoint *string) (*AWS, error) {
|
||||
func newAwsFromCreds(creds *credentials.Credentials, region string) (*AWS, error) {
|
||||
// Create a Session with a custom region
|
||||
s3ForcePathStyle := endpoint != nil
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Credentials: creds,
|
||||
Region: aws.String(region),
|
||||
Endpoint: endpoint,
|
||||
S3ForcePathStyle: &s3ForcePathStyle,
|
||||
Credentials: creds,
|
||||
Region: aws.String(region),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -44,7 +43,7 @@ func newAwsFromCreds(creds *credentials.Credentials, region string, endpoint *st
|
|||
|
||||
// Initialize a new AWS object from individual bits. SessionToken is optional
|
||||
func New(region string, accessKeyID string, accessKey string, sessionToken string) (*AWS, error) {
|
||||
return newAwsFromCreds(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region, nil)
|
||||
return newAwsFromCreds(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region)
|
||||
}
|
||||
|
||||
// Initializes a new AWS object with the credentials info found at filename's location.
|
||||
|
|
@ -57,18 +56,60 @@ func New(region string, accessKeyID string, accessKey string, sessionToken strin
|
|||
// "AWS_SHARED_CREDENTIALS_FILE" env variable or will default to
|
||||
// $HOME/.aws/credentials.
|
||||
func NewFromFile(filename string, region string) (*AWS, error) {
|
||||
return newAwsFromCreds(credentials.NewSharedCredentials(filename, "default"), region, nil)
|
||||
return newAwsFromCreds(credentials.NewSharedCredentials(filename, "default"), region)
|
||||
}
|
||||
|
||||
// Initialize a new AWS object from defaults.
|
||||
// Looks for env variables, shared credential file, and EC2 Instance Roles.
|
||||
func NewDefault(region string) (*AWS, error) {
|
||||
return newAwsFromCreds(nil, region, nil)
|
||||
return newAwsFromCreds(nil, region)
|
||||
}
|
||||
|
||||
// Create a new session from the credentials and the region and returns an *AWS object initialized with it.
|
||||
func newAwsFromCredsWithEndpoint(creds *credentials.Credentials, region, endpoint, caBundle string, skipSSLVerification bool) (*AWS, error) {
|
||||
// Create a Session with a custom region
|
||||
s3ForcePathStyle := true
|
||||
sessionOptions := session.Options{
|
||||
Config: aws.Config{
|
||||
Credentials: creds,
|
||||
Region: aws.String(region),
|
||||
Endpoint: &endpoint,
|
||||
S3ForcePathStyle: &s3ForcePathStyle,
|
||||
},
|
||||
}
|
||||
|
||||
if caBundle != "" {
|
||||
caBundleReader, err := os.Open(caBundle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer caBundleReader.Close()
|
||||
sessionOptions.CustomCABundle = caBundleReader
|
||||
}
|
||||
|
||||
if skipSSLVerification {
|
||||
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} // #nosec G402
|
||||
sessionOptions.Config.HTTPClient = &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(sessionOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AWS{
|
||||
uploader: s3manager.NewUploader(sess),
|
||||
ec2: ec2.New(sess),
|
||||
s3: s3.New(sess),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Initialize a new AWS object targeting a specific endpoint from individual bits. SessionToken is optional
|
||||
func NewForEndpoint(endpoint, region string, accessKeyID string, accessKey string, sessionToken string) (*AWS, error) {
|
||||
return newAwsFromCreds(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region, &endpoint)
|
||||
func NewForEndpoint(endpoint, region, accessKeyID, accessKey, sessionToken, caBundle string, skipSSLVerification bool) (*AWS, error) {
|
||||
return newAwsFromCredsWithEndpoint(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region, endpoint, caBundle, skipSSLVerification)
|
||||
}
|
||||
|
||||
// Initializes a new AWS object targeting a specific endpoint with the credentials info found at filename's location.
|
||||
|
|
@ -80,8 +121,8 @@ func NewForEndpoint(endpoint, region string, accessKeyID string, accessKey strin
|
|||
// If filename is empty the underlying function will look for the
|
||||
// "AWS_SHARED_CREDENTIALS_FILE" env variable or will default to
|
||||
// $HOME/.aws/credentials.
|
||||
func NewForEndpointFromFile(filename string, endpoint, region string) (*AWS, error) {
|
||||
return newAwsFromCreds(credentials.NewSharedCredentials(filename, "default"), region, &endpoint)
|
||||
func NewForEndpointFromFile(filename, endpoint, region, caBundle string, skipSSLVerification bool) (*AWS, error) {
|
||||
return newAwsFromCredsWithEndpoint(credentials.NewSharedCredentials(filename, "default"), region, endpoint, caBundle, skipSSLVerification)
|
||||
}
|
||||
|
||||
func (a *AWS) Upload(filename, bucket, key string) (*s3manager.UploadOutput, error) {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,9 @@ package target
|
|||
|
||||
type GenericS3TargetOptions struct {
|
||||
AWSS3TargetOptions
|
||||
Endpoint string `json:"endpoint"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
CABundle string `json:"ca_bundle"`
|
||||
SkipSSLVerification bool `json:"skip_ssl_verification"`
|
||||
}
|
||||
|
||||
func (GenericS3TargetOptions) isTargetOptions() {}
|
||||
|
|
|
|||
|
|
@ -96,7 +96,9 @@ func (ociUploadSettings) isUploadSettings() {}
|
|||
|
||||
type genericS3UploadSettings struct {
|
||||
awsS3UploadSettings
|
||||
Endpoint string `json:"endpoint"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
CABundle string `json:"ca_bundle"`
|
||||
SkipSSLVerification bool `json:"skip_ssl_verification"`
|
||||
}
|
||||
|
||||
func (genericS3UploadSettings) isUploadSettings() {}
|
||||
|
|
@ -234,7 +236,9 @@ func targetsToUploadResponses(targets []*target.Target, state ComposeState) []up
|
|||
Key: options.Key,
|
||||
// AccessKeyID and SecretAccessKey are intentionally not included.
|
||||
},
|
||||
Endpoint: options.Endpoint,
|
||||
Endpoint: options.Endpoint,
|
||||
CABundle: options.CABundle,
|
||||
SkipSSLVerification: options.SkipSSLVerification,
|
||||
}
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
|
|
@ -345,7 +349,9 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target.
|
|||
Bucket: options.Bucket,
|
||||
Key: options.Key,
|
||||
},
|
||||
Endpoint: options.Endpoint,
|
||||
Endpoint: options.Endpoint,
|
||||
CABundle: options.CABundle,
|
||||
SkipSSLVerification: options.SkipSSLVerification,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -229,6 +229,8 @@ install -m 0755 -vp tools/koji-compose.py %{buildroot}%
|
|||
install -m 0755 -vp tools/koji-compose-v2.py %{buildroot}%{_libexecdir}/osbuild-composer-test/
|
||||
install -m 0755 -vp tools/libvirt_test.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
|
||||
install -m 0755 -vp tools/s3_test.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
|
||||
install -m 0755 -vp tools/generic_s3_test.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
|
||||
install -m 0755 -vp tools/generic_s3_https_test.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
|
||||
install -m 0755 -vp tools/set-env-variables.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
|
||||
install -m 0755 -vp tools/test-case-generators/generate-test-cases %{buildroot}%{_libexecdir}/osbuild-composer-test/
|
||||
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer
|
||||
|
|
|
|||
|
|
@ -82,4 +82,4 @@ EOF
|
|||
|
||||
IMAGE_OBJECT_KEY="${AWS_BUCKET}/${TEST_ID}-disk.qcow2"
|
||||
|
||||
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${AWS_S3_PROVIDER_CONFIG}" "${AWS_CMD} s3 ls ${IMAGE_OBJECT_KEY}" "${AWS_CMD} s3 presign ${IMAGE_OBJECT_KEY}" "${AWS_CMD} s3 rm s3://${IMAGE_OBJECT_KEY}"
|
||||
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${AWS_S3_PROVIDER_CONFIG}" "${AWS_CMD} s3" "${IMAGE_OBJECT_KEY}"
|
||||
|
|
|
|||
5
test/cases/generic_s3_http.sh
Normal file
5
test/cases/generic_s3_http.sh
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Test upload to HTTP S3 server
|
||||
/usr/libexec/osbuild-composer-test/generic_s3_test.sh
|
||||
5
test/cases/generic_s3_https_insecure.sh
Normal file
5
test/cases/generic_s3_https_insecure.sh
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Test upload to HTTPS S3 server without verifying the SSL certificate
|
||||
/usr/libexec/osbuild-composer-test/generic_s3_https_test.sh "no"
|
||||
5
test/cases/generic_s3_https_secure.sh
Normal file
5
test/cases/generic_s3_https_secure.sh
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Test upload to HTTPS S3 server without verifying the SSL certificate
|
||||
/usr/libexec/osbuild-composer-test/generic_s3_https_test.sh "yes"
|
||||
32
tools/generic_s3_https_test.sh
Normal file
32
tools/generic_s3_https_test.sh
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
SECURE=${1}
|
||||
|
||||
if [[ ${SECURE} == "yes" ]]; then
|
||||
CA_CERT_NAME="public.crt"
|
||||
fi
|
||||
|
||||
CERTGEN_VERSION="v1.2.0"
|
||||
|
||||
TEMPDIR=$(mktemp -d)
|
||||
|
||||
CERTS_DIR=/var/lib/s3-certs
|
||||
sudo rm -rf "${CERTS_DIR}" || true
|
||||
sudo mkdir "${CERTS_DIR}"
|
||||
|
||||
function cleanup() {
|
||||
sudo rm -rf "$TEMPDIR" || true
|
||||
sudo rm -rf "$CERTS_DIR" || true
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
pushd "${TEMPDIR}"
|
||||
curl -L -o certgen "https://github.com/minio/certgen/releases/download/${CERTGEN_VERSION}/certgen-linux-amd64"
|
||||
chmod +x certgen
|
||||
./certgen -host localhost
|
||||
sudo mv private.key public.crt "${CERTS_DIR}"
|
||||
popd
|
||||
|
||||
# Test upload to HTTPS S3 server
|
||||
/usr/libexec/osbuild-composer-test/generic_s3_test.sh "${CERTS_DIR}" ${CA_CERT_NAME:-""}
|
||||
173
tools/generic_s3_test.sh
Executable file
173
tools/generic_s3_test.sh
Executable file
|
|
@ -0,0 +1,173 @@
|
|||
#!/bin/bash
|
||||
|
||||
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
|
||||
|
||||
CERTS_DIR=${1:-""}
|
||||
CA_BUNDLE_FILENAME=${2:-""}
|
||||
|
||||
ENDPOINT_SCHEME="http"
|
||||
if [ -n "${CERTS_DIR}" ]; then
|
||||
ENDPOINT_SCHEME="https"
|
||||
fi
|
||||
|
||||
CA_BUNDLE_PATH=""
|
||||
if [ -n "${CERTS_DIR}" ]; then
|
||||
if [ -n "${CA_BUNDLE_FILENAME}" ]; then
|
||||
CA_BUNDLE_PATH=$CERTS_DIR/$CA_BUNDLE_FILENAME
|
||||
else
|
||||
CA_BUNDLE_PATH="skip"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${NIGHTLY:=false}" == "true" ]; then
|
||||
case "${ID}-${VERSION_ID}" in
|
||||
"rhel-8.6" | "rhel-9.0")
|
||||
echo "$0 is not enabled for ${ID}-${VERSION_ID} skipping..."
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Container images for MinIO Server
|
||||
CONTAINER_MINIO_SERVER="quay.io/minio/minio:latest"
|
||||
# Container image used for cloud provider CLI tools
|
||||
CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest"
|
||||
|
||||
# Provision the software under test.
|
||||
/usr/libexec/osbuild-composer-test/provision.sh
|
||||
|
||||
# Check available container runtime
|
||||
if which podman 2>/dev/null >&2; then
|
||||
CONTAINER_RUNTIME=podman
|
||||
elif which docker 2>/dev/null >&2; then
|
||||
CONTAINER_RUNTIME=docker
|
||||
else
|
||||
echo No container runtime found, install podman or docker.
|
||||
exit 2
|
||||
fi
|
||||
|
||||
TEMPDIR=$(mktemp -d)
|
||||
function cleanup() {
|
||||
greenprint "== Script execution stopped or finished - Cleaning up =="
|
||||
sudo rm -rf "$TEMPDIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Generate a string, which can be used as a predictable resource name,
|
||||
# especially when running the test in CI where we may need to clean up
|
||||
# resources in case the test unexpectedly fails or is canceled
|
||||
CI="${CI:-false}"
|
||||
if [[ "$CI" == true ]]; then
|
||||
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
|
||||
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
|
||||
else
|
||||
# if not running in Jenkins, generate ID not relying on specific env variables
|
||||
TEST_ID=$(uuidgen);
|
||||
fi
|
||||
|
||||
# Set up temporary files.
|
||||
MINIO_PROVIDER_CONFIG=${TEMPDIR}/minio.toml
|
||||
MINIO_ENDPOINT="$ENDPOINT_SCHEME://localhost:9000"
|
||||
MINIO_ROOT_USER="X29DU5Q6C5NKDQ8PLGVT"
|
||||
MINIO_ROOT_PASSWORD=$(date +%s | sha256sum | base64 | head -c 32 ; echo)
|
||||
MINIO_BUCKET="ci-test"
|
||||
MINIO_REGION="us-east-1"
|
||||
|
||||
# We need awscli to talk to the S3 Server.
|
||||
if ! hash aws; then
|
||||
echo "Using 'awscli' from a container"
|
||||
sudo ${CONTAINER_RUNTIME} pull "${CONTAINER_IMAGE_CLOUD_TOOLS}"
|
||||
|
||||
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
|
||||
--network=host \
|
||||
-e AWS_ACCESS_KEY_ID=${MINIO_ROOT_USER} \
|
||||
-e AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD}"
|
||||
|
||||
if [ -n "${CA_BUNDLE_PATH}" ] && [ "${CA_BUNDLE_PATH}" != "skip" ]; then
|
||||
AWS_CMD="${AWS_CMD} -v ${CA_BUNDLE_PATH}:${CA_BUNDLE_PATH}:z"
|
||||
fi
|
||||
|
||||
AWS_CMD="${AWS_CMD} ${CONTAINER_IMAGE_CLOUD_TOOLS}"
|
||||
else
|
||||
echo "Using pre-installed 'aws' from the system"
|
||||
fi
|
||||
AWS_CMD="${AWS_CMD} aws --region $MINIO_REGION --endpoint-url $MINIO_ENDPOINT"
|
||||
if [ -n "${CA_BUNDLE_PATH}" ]; then
|
||||
if [ "${CA_BUNDLE_PATH}" == "skip" ]; then
|
||||
AWS_CMD="${AWS_CMD} --no-verify-ssl"
|
||||
else
|
||||
AWS_CMD="${AWS_CMD} --ca-bundle $CA_BUNDLE_PATH"
|
||||
fi
|
||||
fi
|
||||
$AWS_CMD --version
|
||||
S3_CMD="${AWS_CMD} s3"
|
||||
|
||||
# Write an AWS TOML file
|
||||
tee "$MINIO_PROVIDER_CONFIG" > /dev/null << EOF
|
||||
provider = "generic.s3"
|
||||
|
||||
[settings]
|
||||
endpoint = "${MINIO_ENDPOINT}"
|
||||
accessKeyID = "${MINIO_ROOT_USER}"
|
||||
secretAccessKey = "${MINIO_ROOT_PASSWORD}"
|
||||
bucket = "${MINIO_BUCKET}"
|
||||
region = "${MINIO_REGION}"
|
||||
key = "${TEST_ID}"
|
||||
EOF
|
||||
if [ -n "${CA_BUNDLE_PATH}" ]; then
|
||||
if [ "${CA_BUNDLE_PATH}" == "skip" ]; then
|
||||
echo "skip_ssl_verification = true" >> "$MINIO_PROVIDER_CONFIG"
|
||||
else
|
||||
echo "ca_bundle = \"${CA_BUNDLE_PATH}\"" >> "$MINIO_PROVIDER_CONFIG"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start the MinIO Server
|
||||
MINIO_CONTAINER_NAME="minio-server"
|
||||
if [ -z "${CERTS_DIR}" ]; then
|
||||
sudo ${CONTAINER_RUNTIME} run --rm -d \
|
||||
--name ${MINIO_CONTAINER_NAME} \
|
||||
-p 9000:9000 \
|
||||
-e MINIO_BROWSER=off \
|
||||
-e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \
|
||||
-e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \
|
||||
${CONTAINER_MINIO_SERVER} server /data
|
||||
else
|
||||
sudo ${CONTAINER_RUNTIME} run --rm -d \
|
||||
--name ${MINIO_CONTAINER_NAME} \
|
||||
-p 9000:9000 \
|
||||
-e MINIO_BROWSER=off \
|
||||
-e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \
|
||||
-e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \
|
||||
-v "${CERTS_DIR}":/root/.minio/certs:z \
|
||||
${CONTAINER_MINIO_SERVER} server /data
|
||||
fi
|
||||
# Kill the server once we're done
|
||||
trap 'sudo ${CONTAINER_RUNTIME} kill ${MINIO_CONTAINER_NAME}' EXIT
|
||||
|
||||
# Configure the local server (retry until the service is up)
|
||||
MINIO_CONFIGURE_RETRY=0
|
||||
MINIO_CONFIGURE_MAX_RETRY=5
|
||||
MINIO_RETRY_INTERVAL=15
|
||||
until [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]
|
||||
do
|
||||
${S3_CMD} ls && break
|
||||
MINIO_CONFIGURE_RETRY=$((MINIO_CONFIGURE_RETRY + 1))
|
||||
echo "Retrying [${MINIO_CONFIGURE_RETRY}/${MINIO_CONFIGURE_MAX_RETRY}] in ${MINIO_RETRY_INTERVAL}(s) "
|
||||
sleep ${MINIO_RETRY_INTERVAL}
|
||||
done
|
||||
|
||||
if [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]; then
|
||||
echo "Failed to communicate with the MinIO server after ${MINIO_CONFIGURE_MAX_RETRY} attempts!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the bucket
|
||||
${S3_CMD} mb s3://${MINIO_BUCKET}
|
||||
|
||||
IMAGE_OBJECT_KEY="${MINIO_BUCKET}/${TEST_ID}-disk.qcow2"
|
||||
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${MINIO_PROVIDER_CONFIG}" "${S3_CMD}" "${IMAGE_OBJECT_KEY}" "${CA_BUNDLE_PATH}"
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
set -euo pipefail
|
||||
|
||||
#
|
||||
# tests that guest images are buildable using composer-cli and and verifies
|
||||
# tests that guest images are buildable using composer-cli and and verifies
|
||||
# they boot with cloud-init using libvirt
|
||||
#
|
||||
|
||||
|
|
@ -19,6 +19,8 @@ IMAGE_TYPE=${1:-qcow2}
|
|||
BOOT_TYPE=${2:-bios}
|
||||
# Take the image from the url passes to the script or build it by default if nothing
|
||||
LIBVIRT_IMAGE_URL=${3:-""}
|
||||
# When downloading the image, if provided, use this CA bundle, or skip verification
|
||||
LIBVIRT_IMAGE_URL_CA_BUNDLE=${4:-""}
|
||||
|
||||
# Select the file extension based on the image that we are building.
|
||||
IMAGE_EXTENSION=$IMAGE_TYPE
|
||||
|
|
@ -214,7 +216,15 @@ EOF
|
|||
else
|
||||
pushd "${BIG_TEMP_DIR}"
|
||||
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
|
||||
sudo curl -o "${LIBVIRT_IMAGE_PATH}" "${LIBVIRT_IMAGE_URL}"
|
||||
if [ -n "${LIBVIRT_IMAGE_URL_CA_BUNDLE}" ]; then
|
||||
if [ "${LIBVIRT_IMAGE_URL_CA_BUNDLE}" == "skip" ]; then
|
||||
sudo curl -o "${LIBVIRT_IMAGE_PATH}" -k "${LIBVIRT_IMAGE_URL}"
|
||||
else
|
||||
sudo curl -o "${LIBVIRT_IMAGE_PATH}" --cacert "${LIBVIRT_IMAGE_URL_CA_BUNDLE}" "${LIBVIRT_IMAGE_URL}"
|
||||
fi
|
||||
else
|
||||
sudo curl -o "${LIBVIRT_IMAGE_PATH}" "${LIBVIRT_IMAGE_URL}"
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ source /usr/libexec/osbuild-composer-test/set-env-variables.sh
|
|||
|
||||
TEST_ID=${1}
|
||||
S3_PROVIDER_CONFIG_FILE=${2}
|
||||
S3_CHECK_CMD=${3}
|
||||
S3_GET_URL_CMD=${4}
|
||||
S3_DELETE_CMD=${5:-""}
|
||||
S3_CMD=${3}
|
||||
IMAGE_OBJECT_KEY=${4}
|
||||
S3_CA_BUNDLE=${5:-""}
|
||||
|
||||
# Colorful output.
|
||||
function greenprint {
|
||||
|
|
@ -126,20 +126,18 @@ sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
|
|||
|
||||
# Find the image that we made in the AWS Bucket
|
||||
greenprint "🔍 Search for created image"
|
||||
if ! bash -c "${S3_CHECK_CMD}"; then
|
||||
if ! bash -c "${S3_CMD} ls ${IMAGE_OBJECT_KEY}"; then
|
||||
echo "Failed to find the image in the S3 Bucket"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function removeImageFromS3() {
|
||||
bash -c "${S3_DELETE_CMD}"
|
||||
bash -c "${S3_CMD} rm s3://${IMAGE_OBJECT_KEY}"
|
||||
}
|
||||
if [ -n "${S3_DELETE_CMD}" ]; then
|
||||
trap removeImageFromS3 EXIT
|
||||
fi
|
||||
trap removeImageFromS3 EXIT
|
||||
|
||||
# Generate a URL for the image
|
||||
QCOW2_IMAGE_URL=$(bash -c "${S3_GET_URL_CMD}")
|
||||
QCOW2_IMAGE_URL=$(bash -c "${S3_CMD} presign ${IMAGE_OBJECT_KEY}")
|
||||
|
||||
# Run the image on KVM
|
||||
/usr/libexec/osbuild-composer-test/libvirt_test.sh qcow2 bios "${QCOW2_IMAGE_URL}"
|
||||
/usr/libexec/osbuild-composer-test/libvirt_test.sh qcow2 bios "${QCOW2_IMAGE_URL}" "${S3_CA_BUNDLE}"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue