diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a241031b4..7cc9cd86a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -450,6 +450,7 @@ cross-distro.sh: - edge-commit - gcp - vsphere + - edge-commit generic.s3 API: stage: test diff --git a/cmd/osbuild-worker/jobimpl-osbuild.go b/cmd/osbuild-worker/jobimpl-osbuild.go index 7b797e11f..71e72755b 100644 --- a/cmd/osbuild-worker/jobimpl-osbuild.go +++ b/cmd/osbuild-worker/jobimpl-osbuild.go @@ -29,15 +29,24 @@ import ( "github.com/osbuild/osbuild-composer/internal/worker/clienterrors" ) +type S3Configuration struct { + Creds string + Endpoint string + Region string + Bucket string + CABundle string + SkipSSLVerification bool +} + type OSBuildJobImpl struct { - Store string - Output string - KojiServers map[string]koji.GSSAPICredentials - GCPCreds string - AzureCreds *azure.Credentials - AWSCreds string - AWSBucket string - GenericS3Creds string + Store string + Output string + KojiServers map[string]koji.GSSAPICredentials + GCPCreds string + AzureCreds *azure.Credentials + AWSCreds string + AWSBucket string + S3Config S3Configuration } // Returns an *awscloud.AWS object with the credentials of the request. If they @@ -53,16 +62,68 @@ func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string } } -func (impl *OSBuildJobImpl) getAWSForEndpoint(options *target.GenericS3TargetOptions) (*awscloud.AWS, error) { +func (impl *OSBuildJobImpl) getAWSForS3TargetFromOptions(options *target.AWSS3TargetOptions) (*awscloud.AWS, error) { if options.AccessKeyID != "" && options.SecretAccessKey != "" { return awscloud.NewForEndpoint(options.Endpoint, options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken, options.CABundle, options.SkipSSLVerification) } - if impl.GenericS3Creds != "" { - return awscloud.NewForEndpointFromFile(impl.GenericS3Creds, options.Endpoint, options.Region, options.CABundle, options.SkipSSLVerification) + if impl.S3Config.Creds != "" { + return awscloud.NewForEndpointFromFile(impl.S3Config.Creds, options.Endpoint, options.Region, options.CABundle, options.SkipSSLVerification) } return nil, fmt.Errorf("no credentials found") } +func (impl *OSBuildJobImpl) getAWSForS3TargetFromConfig() (*awscloud.AWS, string, error) { + err := impl.verifyS3TargetConfiguration() + if err != nil { + return nil, "", err + } + aws, err := awscloud.NewForEndpointFromFile(impl.S3Config.Creds, impl.S3Config.Endpoint, impl.S3Config.Region, impl.S3Config.CABundle, impl.S3Config.SkipSSLVerification) + return aws, impl.S3Config.Bucket, err +} + +func (impl *OSBuildJobImpl) verifyS3TargetConfiguration() error { + if impl.S3Config.Endpoint == "" { + return fmt.Errorf("no default endpoint for S3 was set") + } + + if impl.S3Config.Region == "" { + return fmt.Errorf("no default region for S3 was set") + } + + if impl.S3Config.Bucket == "" { + return fmt.Errorf("no default bucket for S3 was set") + } + + if impl.S3Config.Creds == "" { + return fmt.Errorf("no default credentials for S3 was set") + } + + return nil +} + +func (impl *OSBuildJobImpl) getAWSForS3Target(options *target.AWSS3TargetOptions) (*awscloud.AWS, string, error) { + var aws *awscloud.AWS = nil + var err error + + bucket := options.Bucket + + // Endpoint == "" && Region != "" => AWS (Weldr and Composer) + if options.Endpoint == "" && options.Region != "" { + aws, err = impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken) + if impl.AWSBucket != "" { + bucket = impl.AWSBucket + } + } else if options.Endpoint != "" && options.Region != "" { // Endpoint != "" && Region != "" => Generic S3 Weldr API + aws, err = impl.getAWSForS3TargetFromOptions(options) + } else if options.Endpoint == "" && options.Region == "" { // Endpoint == "" && Region == "" => Generic S3 Composer API + aws, bucket, err = impl.getAWSForS3TargetFromConfig() + } else { + err = fmt.Errorf("s3 server configuration is incomplete") + } + + return aws, bucket, err +} + // getGCP returns an *gcp.GCP object using credentials based on the following // predefined preference: // @@ -113,7 +174,7 @@ func validateResult(result *worker.OSBuildJobResult, jobID string) { result.Success = true } -func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filename string, osbuildJobResult *worker.OSBuildJobResult, genericS3 bool, streamOptimized bool, streamOptimizedPath string) (err error) { +func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filename string, osbuildJobResult *worker.OSBuildJobResult, streamOptimized bool, streamOptimizedPath string) (err error) { imagePath := path.Join(outputDirectory, exportPath, filename) // TODO: delete the stream-optimized handling after "some" time (kept for backward compatibility) @@ -159,13 +220,7 @@ func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filen return } - var targetResult *target.TargetResult - if genericS3 { - targetResult = target.NewGenericS3TargetResult(&target.GenericS3TargetResultOptions{URL: url}) - } else { - targetResult = target.NewAWSS3TargetResult(&target.AWSS3TargetResultOptions{URL: url}) - } - osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, targetResult) + osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, target.NewAWSS3TargetResult(&target.AWSS3TargetResultOptions{URL: url})) osbuildJobResult.Success = true osbuildJobResult.UploadStatus = "success" @@ -421,29 +476,13 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error { osbuildJobResult.Success = true osbuildJobResult.UploadStatus = "success" case *target.AWSS3TargetOptions: - a, err := impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken) + a, bucket, err := impl.getAWSForS3Target(options) if err != nil { osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error()) return nil } - bucket := options.Bucket - if impl.AWSBucket != "" { - bucket = impl.AWSBucket - } - - err = uploadToS3(a, outputDirectory, exportPath, bucket, options.Key, options.Filename, osbuildJobResult, false, args.StreamOptimized, streamOptimizedPath) - if err != nil { - return nil - } - case *target.GenericS3TargetOptions: - a, err := impl.getAWSForEndpoint(options) - if err != nil { - osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error()) - return nil - } - - err = uploadToS3(a, outputDirectory, exportPath, options.Bucket, options.Key, options.Filename, osbuildJobResult, true, args.StreamOptimized, streamOptimizedPath) + err = uploadToS3(a, outputDirectory, exportPath, bucket, options.Key, options.Filename, osbuildJobResult, args.StreamOptimized, streamOptimizedPath) if err != nil { return nil } diff --git a/cmd/osbuild-worker/main.go b/cmd/osbuild-worker/main.go index be2d528d4..a3e029f90 100644 --- a/cmd/osbuild-worker/main.go +++ b/cmd/osbuild-worker/main.go @@ -214,7 +214,12 @@ func main() { Bucket string `toml:"bucket"` } `toml:"aws"` GenericS3 *struct { - Credentials string `toml:"credentials"` + Credentials string `toml:"credentials"` + Endpoint string `toml:"endpoint"` + Region string `toml:"region"` + Bucket string `toml:"bucket"` + CABundle string `toml:"ca_bundle"` + SkipSSLVerification bool `toml:"skip_ssl_verification"` } `toml:"generic_s3"` Authentication *struct { OAuthURL string `toml:"oauth_url"` @@ -392,8 +397,18 @@ func main() { } var genericS3Credentials = "" + var genericS3Endpoint = "" + var genericS3Region = "" + var genericS3Bucket = "" + var genericS3CABundle = "" + var genericS3SkipSSLVerification = false if config.GenericS3 != nil { genericS3Credentials = config.GenericS3.Credentials + genericS3Endpoint = config.GenericS3.Endpoint + genericS3Region = config.GenericS3.Region + genericS3Bucket = config.GenericS3.Bucket + genericS3CABundle = config.GenericS3.CABundle + genericS3SkipSSLVerification = config.GenericS3.SkipSSLVerification } // depsolve jobs can be done during other jobs @@ -434,14 +449,21 @@ func main() { // non-depsolve job jobImpls := map[string]JobImplementation{ "osbuild": &OSBuildJobImpl{ - Store: store, - Output: output, - KojiServers: kojiServers, - GCPCreds: gcpCredentials, - AzureCreds: azureCredentials, - AWSCreds: awsCredentials, - AWSBucket: awsBucket, - GenericS3Creds: genericS3Credentials, + Store: store, + Output: output, + KojiServers: kojiServers, + GCPCreds: gcpCredentials, + AzureCreds: azureCredentials, + AWSCreds: awsCredentials, + AWSBucket: awsBucket, + S3Config: S3Configuration{ + Creds: genericS3Credentials, + Endpoint: genericS3Endpoint, + Region: genericS3Region, + Bucket: genericS3Bucket, + CABundle: genericS3CABundle, + SkipSSLVerification: genericS3SkipSSLVerification, + }, }, "osbuild-koji": &OSBuildKojiJobImpl{ Store: store, diff --git a/internal/target/aws_target.go b/internal/target/aws_target.go index 72973882e..756e0f073 100644 --- a/internal/target/aws_target.go +++ b/internal/target/aws_target.go @@ -29,13 +29,16 @@ func NewAWSTargetResult(options *AWSTargetResultOptions) *TargetResult { } type AWSS3TargetOptions struct { - Filename string `json:"filename"` - Region string `json:"region"` - AccessKeyID string `json:"accessKeyID"` - SecretAccessKey string `json:"secretAccessKey"` - SessionToken string `json:"sessionToken"` - Bucket string `json:"bucket"` - Key string `json:"key"` + Filename string `json:"filename"` + Region string `json:"region"` + AccessKeyID string `json:"accessKeyID"` + SecretAccessKey string `json:"secretAccessKey"` + SessionToken string `json:"sessionToken"` + Bucket string `json:"bucket"` + Key string `json:"key"` + Endpoint string `json:"endpoint"` + CABundle string `json:"ca_bundle"` + SkipSSLVerification bool `json:"skip_ssl_verification"` } func (AWSS3TargetOptions) isTargetOptions() {} diff --git a/internal/target/generic_s3_target.go b/internal/target/generic_s3_target.go deleted file mode 100644 index f95be6293..000000000 --- a/internal/target/generic_s3_target.go +++ /dev/null @@ -1,22 +0,0 @@ -package target - -type GenericS3TargetOptions struct { - AWSS3TargetOptions - Endpoint string `json:"endpoint"` - CABundle string `json:"ca_bundle"` - SkipSSLVerification bool `json:"skip_ssl_verification"` -} - -func (GenericS3TargetOptions) isTargetOptions() {} - -func NewGenericS3Target(options *GenericS3TargetOptions) *Target { - return newTarget("org.osbuild.generic.s3", options) -} - -type GenericS3TargetResultOptions AWSS3TargetResultOptions - -func (GenericS3TargetResultOptions) isTargetResultOptions() {} - -func NewGenericS3TargetResult(options *GenericS3TargetResultOptions) *TargetResult { - return newTargetResult("org.osbuild.generic.s3", options) -} diff --git a/internal/target/target.go b/internal/target/target.go index ace383c8a..fccc4c696 100644 --- a/internal/target/target.go +++ b/internal/target/target.go @@ -83,8 +83,6 @@ func UnmarshalTargetOptions(targetName string, rawOptions json.RawMessage) (Targ options = new(VMWareTargetOptions) case "org.osbuild.oci": options = new(OCITargetOptions) - case "org.osbuild.generic.s3": - options = new(GenericS3TargetOptions) default: return nil, errors.New("unexpected target name") } diff --git a/internal/target/targetresult.go b/internal/target/targetresult.go index e3b29277c..f1f143968 100644 --- a/internal/target/targetresult.go +++ b/internal/target/targetresult.go @@ -55,8 +55,6 @@ func UnmarshalTargetResultOptions(trName string, rawOptions json.RawMessage) (Ta options = new(AzureImageTargetResultOptions) case "org.osbuild.oci": options = new(OCITargetResultOptions) - case "org.osbuild.generic.s3": - options = new(GenericS3TargetResultOptions) default: return nil, fmt.Errorf("unexpected target result name: %s", trName) } diff --git a/internal/weldr/upload.go b/internal/weldr/upload.go index d5e194554..9faeb54ee 100644 --- a/internal/weldr/upload.go +++ b/internal/weldr/upload.go @@ -40,12 +40,15 @@ type awsUploadSettings struct { func (awsUploadSettings) isUploadSettings() {} type awsS3UploadSettings struct { - Region string `json:"region"` - AccessKeyID string `json:"accessKeyID,omitempty"` - SecretAccessKey string `json:"secretAccessKey,omitempty"` - SessionToken string `json:"sessionToken,omitempty"` - Bucket string `json:"bucket"` - Key string `json:"key"` + Region string `json:"region"` + AccessKeyID string `json:"accessKeyID,omitempty"` + SecretAccessKey string `json:"secretAccessKey,omitempty"` + SessionToken string `json:"sessionToken,omitempty"` + Bucket string `json:"bucket"` + Key string `json:"key"` + Endpoint string `json:"endpoint"` + CABundle string `json:"ca_bundle"` + SkipSSLVerification bool `json:"skip_ssl_verification"` } func (awsS3UploadSettings) isUploadSettings() {} @@ -94,15 +97,6 @@ type ociUploadSettings struct { func (ociUploadSettings) isUploadSettings() {} -type genericS3UploadSettings struct { - awsS3UploadSettings - Endpoint string `json:"endpoint"` - CABundle string `json:"ca_bundle"` - SkipSSLVerification bool `json:"skip_ssl_verification"` -} - -func (genericS3UploadSettings) isUploadSettings() {} - type uploadRequest struct { Provider string `json:"provider"` ImageName string `json:"image_name"` @@ -137,7 +131,9 @@ func (u *uploadRequest) UnmarshalJSON(data []byte) error { case "oci": settings = new(ociUploadSettings) case "generic.s3": - settings = new(genericS3UploadSettings) + // While the API still accepts provider type "generic.s3", the request is handled + // in the same way as for a request with provider type "aws.s3" + settings = new(awsS3UploadSettings) default: return errors.New("unexpected provider name") } @@ -227,20 +223,6 @@ func targetsToUploadResponses(targets []*target.Target, state ComposeState) []up // AccessKeyID and SecretAccessKey are intentionally not included. } uploads = append(uploads, upload) - case *target.GenericS3TargetOptions: - upload.ProviderName = "generic.s3" - upload.Settings = &genericS3UploadSettings{ - awsS3UploadSettings: awsS3UploadSettings{ - Region: options.Region, - Bucket: options.Bucket, - Key: options.Key, - // AccessKeyID and SecretAccessKey are intentionally not included. - }, - Endpoint: options.Endpoint, - CABundle: options.CABundle, - SkipSSLVerification: options.SkipSSLVerification, - } - uploads = append(uploads, upload) } } @@ -270,13 +252,16 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target. case *awsS3UploadSettings: t.Name = "org.osbuild.aws.s3" t.Options = &target.AWSS3TargetOptions{ - Filename: imageType.Filename(), - Region: options.Region, - AccessKeyID: options.AccessKeyID, - SecretAccessKey: options.SecretAccessKey, - SessionToken: options.SessionToken, - Bucket: options.Bucket, - Key: options.Key, + Filename: imageType.Filename(), + Region: options.Region, + AccessKeyID: options.AccessKeyID, + SecretAccessKey: options.SecretAccessKey, + SessionToken: options.SessionToken, + Bucket: options.Bucket, + Key: options.Key, + Endpoint: options.Endpoint, + CABundle: options.CABundle, + SkipSSLVerification: options.SkipSSLVerification, } case *azureUploadSettings: t.Name = "org.osbuild.azure" @@ -337,22 +322,6 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target. Namespace: options.Namespace, Compartment: options.Compartment, } - case *genericS3UploadSettings: - t.Name = "org.osbuild.generic.s3" - t.Options = &target.GenericS3TargetOptions{ - AWSS3TargetOptions: target.AWSS3TargetOptions{ - Filename: imageType.Filename(), - Region: options.Region, - AccessKeyID: options.AccessKeyID, - SecretAccessKey: options.SecretAccessKey, - SessionToken: options.SessionToken, - Bucket: options.Bucket, - Key: options.Key, - }, - Endpoint: options.Endpoint, - CABundle: options.CABundle, - SkipSSLVerification: options.SkipSSLVerification, - } } return &t diff --git a/test/cases/api.sh b/test/cases/api.sh index 7f04e6b91..5102a0e60 100755 --- a/test/cases/api.sh +++ b/test/cases/api.sh @@ -12,9 +12,36 @@ # from a run on a remote continuous integration system. # -if (( $# != 1 )); then - echo "$0 requires exactly one argument" - echo "Please specify an image type to build" +# +# Cloud provider / target names +# + +CLOUD_PROVIDER_AWS="aws" +CLOUD_PROVIDER_GCP="gcp" +CLOUD_PROVIDER_AZURE="azure" +CLOUD_PROVIDER_AWS_S3="aws.s3" +CLOUD_PROVIDER_GENERIC_S3="generic.s3" + +# +# Supported Image type names +# +IMAGE_TYPE_AWS="aws" +IMAGE_TYPE_AZURE="azure" +IMAGE_TYPE_EDGE_COMMIT="edge-commit" +IMAGE_TYPE_EDGE_CONTAINER="edge-container" +IMAGE_TYPE_EDGE_INSTALLER="edge-installer" +IMAGE_TYPE_GCP="gcp" +IMAGE_TYPE_IMAGE_INSTALLER="image-installer" +IMAGE_TYPE_GUEST="guest-image" +IMAGE_TYPE_VSPHERE="vsphere" + +if (( $# > 2 )); then + echo "$0 does not support more than two arguments" + exit 1 +fi + +if (( $# == 0 )); then + echo "$0 requires that you set the image type to build" exit 1 fi @@ -22,6 +49,32 @@ set -euxo pipefail IMAGE_TYPE="$1" +# select cloud provider based on image type +# +# the supported image types are listed in the api spec (internal/cloudapi/v2/openapi.v2.yml) +case ${IMAGE_TYPE} in + "$IMAGE_TYPE_AWS") + CLOUD_PROVIDER="${CLOUD_PROVIDER_AWS}" + ;; + "$IMAGE_TYPE_AZURE") + CLOUD_PROVIDER="${CLOUD_PROVIDER_AZURE}" + ;; + "$IMAGE_TYPE_GCP") + CLOUD_PROVIDER="${CLOUD_PROVIDER_GCP}" + ;; + "$IMAGE_TYPE_EDGE_COMMIT"|"$IMAGE_TYPE_EDGE_CONTAINER"|"$IMAGE_TYPE_EDGE_INSTALLER"|"$IMAGE_TYPE_IMAGE_INSTALLER"|"$IMAGE_TYPE_GUEST"|"$IMAGE_TYPE_VSPHERE") + # blobby image types: upload to s3 and provide download link + CLOUD_PROVIDER="${2:-$CLOUD_PROVIDER_AWS_S3}" + if [ "${CLOUD_PROVIDER}" != "${CLOUD_PROVIDER_AWS_S3}" ] && [ "${CLOUD_PROVIDER}" != "${CLOUD_PROVIDER_GENERIC_S3}" ]; then + echo "${IMAGE_TYPE} can only be uploaded to either ${CLOUD_PROVIDER_AWS_S3} or ${CLOUD_PROVIDER_GENERIC_S3}" + exit 1 + fi + ;; + *) + echo "Unknown image type: ${IMAGE_TYPE}" + exit 1 +esac + # Colorful timestamped output. function greenprint { echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" @@ -96,51 +149,6 @@ EOF sudo systemctl restart osbuild-composer -# -# Cloud provider / target names -# - -CLOUD_PROVIDER_AWS="aws" -CLOUD_PROVIDER_GCP="gcp" -CLOUD_PROVIDER_AZURE="azure" -CLOUD_PROVIDER_AWS_S3="aws.s3" - -# -# Supported Image type names -# -IMAGE_TYPE_AWS="aws" -IMAGE_TYPE_AZURE="azure" -IMAGE_TYPE_EDGE_COMMIT="edge-commit" -IMAGE_TYPE_EDGE_CONTAINER="edge-container" -IMAGE_TYPE_EDGE_INSTALLER="edge-installer" -IMAGE_TYPE_GCP="gcp" -IMAGE_TYPE_IMAGE_INSTALLER="image-installer" -IMAGE_TYPE_GUEST="guest-image" -IMAGE_TYPE_VSPHERE="vsphere" - -# select cloud provider based on image type -# -# the supported image types are listed in the api spec (internal/cloudapi/v2/openapi.v2.yml) - -case ${IMAGE_TYPE} in - "$IMAGE_TYPE_AWS") - CLOUD_PROVIDER="${CLOUD_PROVIDER_AWS}" - ;; - "$IMAGE_TYPE_AZURE") - CLOUD_PROVIDER="${CLOUD_PROVIDER_AZURE}" - ;; - "$IMAGE_TYPE_GCP") - CLOUD_PROVIDER="${CLOUD_PROVIDER_GCP}" - ;; - "$IMAGE_TYPE_EDGE_COMMIT"|"$IMAGE_TYPE_EDGE_CONTAINER"|"$IMAGE_TYPE_EDGE_INSTALLER"|"$IMAGE_TYPE_IMAGE_INSTALLER"|"$IMAGE_TYPE_GUEST"|"$IMAGE_TYPE_VSPHERE") - # blobby image types: upload to s3 and provide download link - CLOUD_PROVIDER="${CLOUD_PROVIDER_AWS_S3}" - ;; - *) - echo "Unknown image type: ${IMAGE_TYPE}" - exit 1 -esac - greenprint "Using Cloud Provider / Target ${CLOUD_PROVIDER} for Image Type ${IMAGE_TYPE}" # @@ -305,6 +313,13 @@ function cleanupVSphere() { "${VSPHERE_VM_NAME}" } +function cleanupGenericS3() { + MINIO_CONTAINER_NAME="${MINIO_CONTAINER_NAME:-}" + if [ -n "${MINIO_CONTAINER_NAME}" ]; then + sudo ${CONTAINER_RUNTIME} kill "${MINIO_CONTAINER_NAME}" + fi +} + function dump_db() { # Disable -x for these commands to avoid printing the whole result and manifest into the log set +x @@ -337,6 +352,10 @@ function cleanup() { "$CLOUD_PROVIDER_AZURE") cleanupAzure ;; + "$CLOUD_PROVIDER_GENERIC_S3") + cleanupGenericS3 + [[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]] && cleanupVSphere + ;; esac # dump the DB here to ensure that it gets dumped even if the test fails @@ -472,6 +491,81 @@ function installClientVSphere() { $GOVC_CMD version } +function installGenericS3() { + local CONTAINER_MINIO_SERVER="quay.io/minio/minio:latest" + MINIO_CONTAINER_NAME="minio-server" + MINIO_ENDPOINT="http://localhost:9000" + local MINIO_ROOT_USER="X29DU5Q6C5NKDQ8PLGVT" + local MINIO_ROOT_PASSWORD + MINIO_ROOT_PASSWORD=$(date +%s | sha256sum | base64 | head -c 32 ; echo) + MINIO_BUCKET="ci-test" + local MINIO_REGION="us-east-1" + local MINIO_CREDENTIALS_FILE="/etc/osbuild-worker/minio-creds" + + sudo ${CONTAINER_RUNTIME} run --rm -d \ + --name ${MINIO_CONTAINER_NAME} \ + -p 9000:9000 \ + -e MINIO_BROWSER=off \ + -e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \ + -e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \ + ${CONTAINER_MINIO_SERVER} server /data + + if ! hash aws; then + echo "Using 'awscli' from a container" + sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS} + + AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ + -e AWS_ACCESS_KEY_ID=${MINIO_ROOT_USER} \ + -e AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD} \ + -v ${WORKDIR}:${WORKDIR}:Z \ + --network host \ + ${CONTAINER_IMAGE_CLOUD_TOOLS} aws" + else + echo "Using pre-installed 'aws' from the system" + AWS_CMD="AWS_ACCESS_KEY_ID=${MINIO_ROOT_USER} \ + AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD} \ + aws" + fi + AWS_CMD+=" --region $MINIO_REGION --output json --color on --endpoint-url $MINIO_ENDPOINT" + $AWS_CMD --version + + # Configure the local server (retry until the service is up) + MINIO_CONFIGURE_RETRY=0 + MINIO_CONFIGURE_MAX_RETRY=5 + MINIO_RETRY_INTERVAL=15 + until [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ] + do + ${AWS_CMD} s3 ls && break + MINIO_CONFIGURE_RETRY=$((MINIO_CONFIGURE_RETRY + 1)) + echo "Retrying [${MINIO_CONFIGURE_RETRY}/${MINIO_CONFIGURE_MAX_RETRY}] in ${MINIO_RETRY_INTERVAL}(s) " + sleep ${MINIO_RETRY_INTERVAL} + done + + if [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]; then + echo "Failed to communicate with the MinIO server after ${MINIO_CONFIGURE_MAX_RETRY} attempts!" + exit 1 + fi + + # Create the bucket + ${AWS_CMD} s3 mb s3://${MINIO_BUCKET} + + cat < "$REQUEST_FILE" << EOF { "distribution": "$DISTRO", @@ -641,7 +740,7 @@ function createReqFileAWSS3() { "ref": "${OSTREE_REF}" }, "upload_options": { - "region": "${AWS_REGION}" + "region": "${IMAGE_REQUEST_REGION}" } } } @@ -650,7 +749,8 @@ EOF # the VSphere test case does not create any additional users, # since this is not supported by the service UI -function createReqFileAWSS3VSphere() { +function createReqFileS3VSphere() { + local IMAGE_REQUEST_REGION=${1:-""} cat > "$REQUEST_FILE" << EOF { "distribution": "$DISTRO", @@ -670,7 +770,7 @@ function createReqFileAWSS3VSphere() { "image_type": "${IMAGE_TYPE}", "repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json), "upload_options": { - "region": "${AWS_REGION}" + "region": "${IMAGE_REQUEST_REGION}" } } } @@ -767,9 +867,9 @@ case $CLOUD_PROVIDER in ;; "$CLOUD_PROVIDER_AWS_S3") if [[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]]; then - createReqFileAWSS3VSphere + createReqFileS3VSphere "${AWS_REGION}" else - createReqFileAWSS3 + createReqFileS3 "${AWS_REGION}" fi ;; "$CLOUD_PROVIDER_GCP") @@ -778,6 +878,13 @@ case $CLOUD_PROVIDER in "$CLOUD_PROVIDER_AZURE") createReqFileAzure ;; + "$CLOUD_PROVIDER_GENERIC_S3") + if [[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]]; then + createReqFileS3VSphere + else + createReqFileS3 + fi + ;; esac # @@ -879,7 +986,11 @@ waitForState SUBS_COMPOSES="$(collectMetrics)" test "$UPLOAD_STATUS" = "success" -test "$UPLOAD_TYPE" = "$CLOUD_PROVIDER" +EXPECTED_UPLOAD_TYPE="$CLOUD_PROVIDER" +if [ "${CLOUD_PROVIDER}" == "${CLOUD_PROVIDER_GENERIC_S3}" ]; then + EXPECTED_UPLOAD_TYPE="${CLOUD_PROVIDER_AWS_S3}" +fi +test "$UPLOAD_TYPE" = "$EXPECTED_UPLOAD_TYPE" test $((INIT_COMPOSES+1)) = "$SUBS_COMPOSES" # @@ -925,6 +1036,15 @@ function checkUploadStatusOptionsAzure() { test "$IMAGE_NAME" = "$AZURE_IMAGE_NAME" } +function checkUploadStatusOptionsGenericS3() { + local S3_URL + S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url') + + # S3 URL contains endpoint and bucket name + echo "$S3_URL" | grep -F "$MINIO_ENDPOINT" - + echo "$S3_URL" | grep -F "$MINIO_BUCKET" - +} + case $CLOUD_PROVIDER in "$CLOUD_PROVIDER_AWS") checkUploadStatusOptionsAWS @@ -938,6 +1058,9 @@ case $CLOUD_PROVIDER in "$CLOUD_PROVIDER_AZURE") checkUploadStatusOptionsAzure ;; + "$CLOUD_PROVIDER_GENERIC_S3") + checkUploadStatusOptionsGenericS3 + ;; esac # @@ -1313,7 +1436,8 @@ function verifyInVSphere() { } # Verify s3 blobs -function verifyInAWSS3() { +function verifyInS3() { + local BUCKET_NAME=${1} local S3_URL S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url') greenprint "Verifying S3 object at ${S3_URL}" @@ -1324,7 +1448,7 @@ function verifyInAWSS3() { # tag the object, also verifying that it exists in the bucket as expected $AWS_CMD s3api put-object-tagging \ - --bucket "${AWS_BUCKET}" \ + --bucket "${BUCKET_NAME}" \ --key "${S3_FILENAME}" \ --tagging '{"TagSet": [{ "Key": "gitlab-ci-test", "Value": "true" }]}' @@ -1481,7 +1605,7 @@ case $CLOUD_PROVIDER in verifyInAWS ;; "$CLOUD_PROVIDER_AWS_S3") - verifyInAWSS3 + verifyInS3 "${AWS_BUCKET}" ;; "$CLOUD_PROVIDER_GCP") verifyInGCP @@ -1489,6 +1613,9 @@ case $CLOUD_PROVIDER in "$CLOUD_PROVIDER_AZURE") verifyInAzure ;; + "$CLOUD_PROVIDER_GENERIC_S3") + verifyInS3 "${MINIO_BUCKET}" + ;; esac # Verify selected package (postgresql) is included in package list