Support Generic S3 upload in Composer API

Use case
--------
If Endpoint is not set and Region is - upload to AWS S3
If both the Endpoint and Region are set - upload the Generic S3 via Weldr API
If neither the Endpoint and Region are set - upload the Generic S3 via Composer API (use configuration)

jobimpl-osbuild
---------------
Add configuration fields for Generic S3 upload
Support S3 upload requests coming from Weldr or Composer API to either AWS or Generic S3
Weldr API for Generic S3 requires that all connection parameters but the credentials be passed in the API call
Composer API for Generic S3 requires that all conneciton parameters are taken from the configuration
Adjust to the consolidation in Target and UploadOptions

Target and UploadOptions
------------------------
Add the fields that were specific to the Generic S3 structures to the AWS S3 one
Remove the structures for Generic S3 and always use the AWS S3 ones

Worker Main
-----------
Add Endpoint, Region, Bucket, CABundle and SkipSSLVerification to the configuration structure
Pass the values to the Server

Weldr API
---------
Keep the generic.s3 provider name to maintain the API, but unmarshel into awsS3UploadSettings

tests - api.sh
--------------
Allow the caller to specifiy either AWS or Generic S3 upload targets for specific image types
Implement the pieces required for testing upload to a Generic S3 service
In some cases generalize the AWS S3 functions for reuse

GitLab CI
---------
Add test case for api.sh tests with edge-commit and generic S3
This commit is contained in:
Ygal Blum 2022-05-24 09:25:29 +03:00
parent 335c597452
commit feb357e538
9 changed files with 325 additions and 190 deletions

View file

@ -450,6 +450,7 @@ cross-distro.sh:
- edge-commit - edge-commit
- gcp - gcp
- vsphere - vsphere
- edge-commit generic.s3
API: API:
stage: test stage: test

View file

@ -29,15 +29,24 @@ import (
"github.com/osbuild/osbuild-composer/internal/worker/clienterrors" "github.com/osbuild/osbuild-composer/internal/worker/clienterrors"
) )
type S3Configuration struct {
Creds string
Endpoint string
Region string
Bucket string
CABundle string
SkipSSLVerification bool
}
type OSBuildJobImpl struct { type OSBuildJobImpl struct {
Store string Store string
Output string Output string
KojiServers map[string]koji.GSSAPICredentials KojiServers map[string]koji.GSSAPICredentials
GCPCreds string GCPCreds string
AzureCreds *azure.Credentials AzureCreds *azure.Credentials
AWSCreds string AWSCreds string
AWSBucket string AWSBucket string
GenericS3Creds string S3Config S3Configuration
} }
// Returns an *awscloud.AWS object with the credentials of the request. If they // Returns an *awscloud.AWS object with the credentials of the request. If they
@ -53,16 +62,68 @@ func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string
} }
} }
func (impl *OSBuildJobImpl) getAWSForEndpoint(options *target.GenericS3TargetOptions) (*awscloud.AWS, error) { func (impl *OSBuildJobImpl) getAWSForS3TargetFromOptions(options *target.AWSS3TargetOptions) (*awscloud.AWS, error) {
if options.AccessKeyID != "" && options.SecretAccessKey != "" { if options.AccessKeyID != "" && options.SecretAccessKey != "" {
return awscloud.NewForEndpoint(options.Endpoint, options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken, options.CABundle, options.SkipSSLVerification) return awscloud.NewForEndpoint(options.Endpoint, options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken, options.CABundle, options.SkipSSLVerification)
} }
if impl.GenericS3Creds != "" { if impl.S3Config.Creds != "" {
return awscloud.NewForEndpointFromFile(impl.GenericS3Creds, options.Endpoint, options.Region, options.CABundle, options.SkipSSLVerification) return awscloud.NewForEndpointFromFile(impl.S3Config.Creds, options.Endpoint, options.Region, options.CABundle, options.SkipSSLVerification)
} }
return nil, fmt.Errorf("no credentials found") return nil, fmt.Errorf("no credentials found")
} }
func (impl *OSBuildJobImpl) getAWSForS3TargetFromConfig() (*awscloud.AWS, string, error) {
err := impl.verifyS3TargetConfiguration()
if err != nil {
return nil, "", err
}
aws, err := awscloud.NewForEndpointFromFile(impl.S3Config.Creds, impl.S3Config.Endpoint, impl.S3Config.Region, impl.S3Config.CABundle, impl.S3Config.SkipSSLVerification)
return aws, impl.S3Config.Bucket, err
}
func (impl *OSBuildJobImpl) verifyS3TargetConfiguration() error {
if impl.S3Config.Endpoint == "" {
return fmt.Errorf("no default endpoint for S3 was set")
}
if impl.S3Config.Region == "" {
return fmt.Errorf("no default region for S3 was set")
}
if impl.S3Config.Bucket == "" {
return fmt.Errorf("no default bucket for S3 was set")
}
if impl.S3Config.Creds == "" {
return fmt.Errorf("no default credentials for S3 was set")
}
return nil
}
func (impl *OSBuildJobImpl) getAWSForS3Target(options *target.AWSS3TargetOptions) (*awscloud.AWS, string, error) {
var aws *awscloud.AWS = nil
var err error
bucket := options.Bucket
// Endpoint == "" && Region != "" => AWS (Weldr and Composer)
if options.Endpoint == "" && options.Region != "" {
aws, err = impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken)
if impl.AWSBucket != "" {
bucket = impl.AWSBucket
}
} else if options.Endpoint != "" && options.Region != "" { // Endpoint != "" && Region != "" => Generic S3 Weldr API
aws, err = impl.getAWSForS3TargetFromOptions(options)
} else if options.Endpoint == "" && options.Region == "" { // Endpoint == "" && Region == "" => Generic S3 Composer API
aws, bucket, err = impl.getAWSForS3TargetFromConfig()
} else {
err = fmt.Errorf("s3 server configuration is incomplete")
}
return aws, bucket, err
}
// getGCP returns an *gcp.GCP object using credentials based on the following // getGCP returns an *gcp.GCP object using credentials based on the following
// predefined preference: // predefined preference:
// //
@ -113,7 +174,7 @@ func validateResult(result *worker.OSBuildJobResult, jobID string) {
result.Success = true result.Success = true
} }
func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filename string, osbuildJobResult *worker.OSBuildJobResult, genericS3 bool, streamOptimized bool, streamOptimizedPath string) (err error) { func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filename string, osbuildJobResult *worker.OSBuildJobResult, streamOptimized bool, streamOptimizedPath string) (err error) {
imagePath := path.Join(outputDirectory, exportPath, filename) imagePath := path.Join(outputDirectory, exportPath, filename)
// TODO: delete the stream-optimized handling after "some" time (kept for backward compatibility) // TODO: delete the stream-optimized handling after "some" time (kept for backward compatibility)
@ -159,13 +220,7 @@ func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filen
return return
} }
var targetResult *target.TargetResult osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, target.NewAWSS3TargetResult(&target.AWSS3TargetResultOptions{URL: url}))
if genericS3 {
targetResult = target.NewGenericS3TargetResult(&target.GenericS3TargetResultOptions{URL: url})
} else {
targetResult = target.NewAWSS3TargetResult(&target.AWSS3TargetResultOptions{URL: url})
}
osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, targetResult)
osbuildJobResult.Success = true osbuildJobResult.Success = true
osbuildJobResult.UploadStatus = "success" osbuildJobResult.UploadStatus = "success"
@ -421,29 +476,13 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error {
osbuildJobResult.Success = true osbuildJobResult.Success = true
osbuildJobResult.UploadStatus = "success" osbuildJobResult.UploadStatus = "success"
case *target.AWSS3TargetOptions: case *target.AWSS3TargetOptions:
a, err := impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken) a, bucket, err := impl.getAWSForS3Target(options)
if err != nil { if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error()) osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil return nil
} }
bucket := options.Bucket err = uploadToS3(a, outputDirectory, exportPath, bucket, options.Key, options.Filename, osbuildJobResult, args.StreamOptimized, streamOptimizedPath)
if impl.AWSBucket != "" {
bucket = impl.AWSBucket
}
err = uploadToS3(a, outputDirectory, exportPath, bucket, options.Key, options.Filename, osbuildJobResult, false, args.StreamOptimized, streamOptimizedPath)
if err != nil {
return nil
}
case *target.GenericS3TargetOptions:
a, err := impl.getAWSForEndpoint(options)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil
}
err = uploadToS3(a, outputDirectory, exportPath, options.Bucket, options.Key, options.Filename, osbuildJobResult, true, args.StreamOptimized, streamOptimizedPath)
if err != nil { if err != nil {
return nil return nil
} }

View file

@ -214,7 +214,12 @@ func main() {
Bucket string `toml:"bucket"` Bucket string `toml:"bucket"`
} `toml:"aws"` } `toml:"aws"`
GenericS3 *struct { GenericS3 *struct {
Credentials string `toml:"credentials"` Credentials string `toml:"credentials"`
Endpoint string `toml:"endpoint"`
Region string `toml:"region"`
Bucket string `toml:"bucket"`
CABundle string `toml:"ca_bundle"`
SkipSSLVerification bool `toml:"skip_ssl_verification"`
} `toml:"generic_s3"` } `toml:"generic_s3"`
Authentication *struct { Authentication *struct {
OAuthURL string `toml:"oauth_url"` OAuthURL string `toml:"oauth_url"`
@ -392,8 +397,18 @@ func main() {
} }
var genericS3Credentials = "" var genericS3Credentials = ""
var genericS3Endpoint = ""
var genericS3Region = ""
var genericS3Bucket = ""
var genericS3CABundle = ""
var genericS3SkipSSLVerification = false
if config.GenericS3 != nil { if config.GenericS3 != nil {
genericS3Credentials = config.GenericS3.Credentials genericS3Credentials = config.GenericS3.Credentials
genericS3Endpoint = config.GenericS3.Endpoint
genericS3Region = config.GenericS3.Region
genericS3Bucket = config.GenericS3.Bucket
genericS3CABundle = config.GenericS3.CABundle
genericS3SkipSSLVerification = config.GenericS3.SkipSSLVerification
} }
// depsolve jobs can be done during other jobs // depsolve jobs can be done during other jobs
@ -434,14 +449,21 @@ func main() {
// non-depsolve job // non-depsolve job
jobImpls := map[string]JobImplementation{ jobImpls := map[string]JobImplementation{
"osbuild": &OSBuildJobImpl{ "osbuild": &OSBuildJobImpl{
Store: store, Store: store,
Output: output, Output: output,
KojiServers: kojiServers, KojiServers: kojiServers,
GCPCreds: gcpCredentials, GCPCreds: gcpCredentials,
AzureCreds: azureCredentials, AzureCreds: azureCredentials,
AWSCreds: awsCredentials, AWSCreds: awsCredentials,
AWSBucket: awsBucket, AWSBucket: awsBucket,
GenericS3Creds: genericS3Credentials, S3Config: S3Configuration{
Creds: genericS3Credentials,
Endpoint: genericS3Endpoint,
Region: genericS3Region,
Bucket: genericS3Bucket,
CABundle: genericS3CABundle,
SkipSSLVerification: genericS3SkipSSLVerification,
},
}, },
"osbuild-koji": &OSBuildKojiJobImpl{ "osbuild-koji": &OSBuildKojiJobImpl{
Store: store, Store: store,

View file

@ -29,13 +29,16 @@ func NewAWSTargetResult(options *AWSTargetResultOptions) *TargetResult {
} }
type AWSS3TargetOptions struct { type AWSS3TargetOptions struct {
Filename string `json:"filename"` Filename string `json:"filename"`
Region string `json:"region"` Region string `json:"region"`
AccessKeyID string `json:"accessKeyID"` AccessKeyID string `json:"accessKeyID"`
SecretAccessKey string `json:"secretAccessKey"` SecretAccessKey string `json:"secretAccessKey"`
SessionToken string `json:"sessionToken"` SessionToken string `json:"sessionToken"`
Bucket string `json:"bucket"` Bucket string `json:"bucket"`
Key string `json:"key"` Key string `json:"key"`
Endpoint string `json:"endpoint"`
CABundle string `json:"ca_bundle"`
SkipSSLVerification bool `json:"skip_ssl_verification"`
} }
func (AWSS3TargetOptions) isTargetOptions() {} func (AWSS3TargetOptions) isTargetOptions() {}

View file

@ -1,22 +0,0 @@
package target
type GenericS3TargetOptions struct {
AWSS3TargetOptions
Endpoint string `json:"endpoint"`
CABundle string `json:"ca_bundle"`
SkipSSLVerification bool `json:"skip_ssl_verification"`
}
func (GenericS3TargetOptions) isTargetOptions() {}
func NewGenericS3Target(options *GenericS3TargetOptions) *Target {
return newTarget("org.osbuild.generic.s3", options)
}
type GenericS3TargetResultOptions AWSS3TargetResultOptions
func (GenericS3TargetResultOptions) isTargetResultOptions() {}
func NewGenericS3TargetResult(options *GenericS3TargetResultOptions) *TargetResult {
return newTargetResult("org.osbuild.generic.s3", options)
}

View file

@ -83,8 +83,6 @@ func UnmarshalTargetOptions(targetName string, rawOptions json.RawMessage) (Targ
options = new(VMWareTargetOptions) options = new(VMWareTargetOptions)
case "org.osbuild.oci": case "org.osbuild.oci":
options = new(OCITargetOptions) options = new(OCITargetOptions)
case "org.osbuild.generic.s3":
options = new(GenericS3TargetOptions)
default: default:
return nil, errors.New("unexpected target name") return nil, errors.New("unexpected target name")
} }

View file

@ -55,8 +55,6 @@ func UnmarshalTargetResultOptions(trName string, rawOptions json.RawMessage) (Ta
options = new(AzureImageTargetResultOptions) options = new(AzureImageTargetResultOptions)
case "org.osbuild.oci": case "org.osbuild.oci":
options = new(OCITargetResultOptions) options = new(OCITargetResultOptions)
case "org.osbuild.generic.s3":
options = new(GenericS3TargetResultOptions)
default: default:
return nil, fmt.Errorf("unexpected target result name: %s", trName) return nil, fmt.Errorf("unexpected target result name: %s", trName)
} }

View file

@ -40,12 +40,15 @@ type awsUploadSettings struct {
func (awsUploadSettings) isUploadSettings() {} func (awsUploadSettings) isUploadSettings() {}
type awsS3UploadSettings struct { type awsS3UploadSettings struct {
Region string `json:"region"` Region string `json:"region"`
AccessKeyID string `json:"accessKeyID,omitempty"` AccessKeyID string `json:"accessKeyID,omitempty"`
SecretAccessKey string `json:"secretAccessKey,omitempty"` SecretAccessKey string `json:"secretAccessKey,omitempty"`
SessionToken string `json:"sessionToken,omitempty"` SessionToken string `json:"sessionToken,omitempty"`
Bucket string `json:"bucket"` Bucket string `json:"bucket"`
Key string `json:"key"` Key string `json:"key"`
Endpoint string `json:"endpoint"`
CABundle string `json:"ca_bundle"`
SkipSSLVerification bool `json:"skip_ssl_verification"`
} }
func (awsS3UploadSettings) isUploadSettings() {} func (awsS3UploadSettings) isUploadSettings() {}
@ -94,15 +97,6 @@ type ociUploadSettings struct {
func (ociUploadSettings) isUploadSettings() {} func (ociUploadSettings) isUploadSettings() {}
type genericS3UploadSettings struct {
awsS3UploadSettings
Endpoint string `json:"endpoint"`
CABundle string `json:"ca_bundle"`
SkipSSLVerification bool `json:"skip_ssl_verification"`
}
func (genericS3UploadSettings) isUploadSettings() {}
type uploadRequest struct { type uploadRequest struct {
Provider string `json:"provider"` Provider string `json:"provider"`
ImageName string `json:"image_name"` ImageName string `json:"image_name"`
@ -137,7 +131,9 @@ func (u *uploadRequest) UnmarshalJSON(data []byte) error {
case "oci": case "oci":
settings = new(ociUploadSettings) settings = new(ociUploadSettings)
case "generic.s3": case "generic.s3":
settings = new(genericS3UploadSettings) // While the API still accepts provider type "generic.s3", the request is handled
// in the same way as for a request with provider type "aws.s3"
settings = new(awsS3UploadSettings)
default: default:
return errors.New("unexpected provider name") return errors.New("unexpected provider name")
} }
@ -227,20 +223,6 @@ func targetsToUploadResponses(targets []*target.Target, state ComposeState) []up
// AccessKeyID and SecretAccessKey are intentionally not included. // AccessKeyID and SecretAccessKey are intentionally not included.
} }
uploads = append(uploads, upload) uploads = append(uploads, upload)
case *target.GenericS3TargetOptions:
upload.ProviderName = "generic.s3"
upload.Settings = &genericS3UploadSettings{
awsS3UploadSettings: awsS3UploadSettings{
Region: options.Region,
Bucket: options.Bucket,
Key: options.Key,
// AccessKeyID and SecretAccessKey are intentionally not included.
},
Endpoint: options.Endpoint,
CABundle: options.CABundle,
SkipSSLVerification: options.SkipSSLVerification,
}
uploads = append(uploads, upload)
} }
} }
@ -270,13 +252,16 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target.
case *awsS3UploadSettings: case *awsS3UploadSettings:
t.Name = "org.osbuild.aws.s3" t.Name = "org.osbuild.aws.s3"
t.Options = &target.AWSS3TargetOptions{ t.Options = &target.AWSS3TargetOptions{
Filename: imageType.Filename(), Filename: imageType.Filename(),
Region: options.Region, Region: options.Region,
AccessKeyID: options.AccessKeyID, AccessKeyID: options.AccessKeyID,
SecretAccessKey: options.SecretAccessKey, SecretAccessKey: options.SecretAccessKey,
SessionToken: options.SessionToken, SessionToken: options.SessionToken,
Bucket: options.Bucket, Bucket: options.Bucket,
Key: options.Key, Key: options.Key,
Endpoint: options.Endpoint,
CABundle: options.CABundle,
SkipSSLVerification: options.SkipSSLVerification,
} }
case *azureUploadSettings: case *azureUploadSettings:
t.Name = "org.osbuild.azure" t.Name = "org.osbuild.azure"
@ -337,22 +322,6 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target.
Namespace: options.Namespace, Namespace: options.Namespace,
Compartment: options.Compartment, Compartment: options.Compartment,
} }
case *genericS3UploadSettings:
t.Name = "org.osbuild.generic.s3"
t.Options = &target.GenericS3TargetOptions{
AWSS3TargetOptions: target.AWSS3TargetOptions{
Filename: imageType.Filename(),
Region: options.Region,
AccessKeyID: options.AccessKeyID,
SecretAccessKey: options.SecretAccessKey,
SessionToken: options.SessionToken,
Bucket: options.Bucket,
Key: options.Key,
},
Endpoint: options.Endpoint,
CABundle: options.CABundle,
SkipSSLVerification: options.SkipSSLVerification,
}
} }
return &t return &t

View file

@ -12,9 +12,36 @@
# from a run on a remote continuous integration system. # from a run on a remote continuous integration system.
# #
if (( $# != 1 )); then #
echo "$0 requires exactly one argument" # Cloud provider / target names
echo "Please specify an image type to build" #
CLOUD_PROVIDER_AWS="aws"
CLOUD_PROVIDER_GCP="gcp"
CLOUD_PROVIDER_AZURE="azure"
CLOUD_PROVIDER_AWS_S3="aws.s3"
CLOUD_PROVIDER_GENERIC_S3="generic.s3"
#
# Supported Image type names
#
IMAGE_TYPE_AWS="aws"
IMAGE_TYPE_AZURE="azure"
IMAGE_TYPE_EDGE_COMMIT="edge-commit"
IMAGE_TYPE_EDGE_CONTAINER="edge-container"
IMAGE_TYPE_EDGE_INSTALLER="edge-installer"
IMAGE_TYPE_GCP="gcp"
IMAGE_TYPE_IMAGE_INSTALLER="image-installer"
IMAGE_TYPE_GUEST="guest-image"
IMAGE_TYPE_VSPHERE="vsphere"
if (( $# > 2 )); then
echo "$0 does not support more than two arguments"
exit 1
fi
if (( $# == 0 )); then
echo "$0 requires that you set the image type to build"
exit 1 exit 1
fi fi
@ -22,6 +49,32 @@ set -euxo pipefail
IMAGE_TYPE="$1" IMAGE_TYPE="$1"
# select cloud provider based on image type
#
# the supported image types are listed in the api spec (internal/cloudapi/v2/openapi.v2.yml)
case ${IMAGE_TYPE} in
"$IMAGE_TYPE_AWS")
CLOUD_PROVIDER="${CLOUD_PROVIDER_AWS}"
;;
"$IMAGE_TYPE_AZURE")
CLOUD_PROVIDER="${CLOUD_PROVIDER_AZURE}"
;;
"$IMAGE_TYPE_GCP")
CLOUD_PROVIDER="${CLOUD_PROVIDER_GCP}"
;;
"$IMAGE_TYPE_EDGE_COMMIT"|"$IMAGE_TYPE_EDGE_CONTAINER"|"$IMAGE_TYPE_EDGE_INSTALLER"|"$IMAGE_TYPE_IMAGE_INSTALLER"|"$IMAGE_TYPE_GUEST"|"$IMAGE_TYPE_VSPHERE")
# blobby image types: upload to s3 and provide download link
CLOUD_PROVIDER="${2:-$CLOUD_PROVIDER_AWS_S3}"
if [ "${CLOUD_PROVIDER}" != "${CLOUD_PROVIDER_AWS_S3}" ] && [ "${CLOUD_PROVIDER}" != "${CLOUD_PROVIDER_GENERIC_S3}" ]; then
echo "${IMAGE_TYPE} can only be uploaded to either ${CLOUD_PROVIDER_AWS_S3} or ${CLOUD_PROVIDER_GENERIC_S3}"
exit 1
fi
;;
*)
echo "Unknown image type: ${IMAGE_TYPE}"
exit 1
esac
# Colorful timestamped output. # Colorful timestamped output.
function greenprint { function greenprint {
echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
@ -96,51 +149,6 @@ EOF
sudo systemctl restart osbuild-composer sudo systemctl restart osbuild-composer
#
# Cloud provider / target names
#
CLOUD_PROVIDER_AWS="aws"
CLOUD_PROVIDER_GCP="gcp"
CLOUD_PROVIDER_AZURE="azure"
CLOUD_PROVIDER_AWS_S3="aws.s3"
#
# Supported Image type names
#
IMAGE_TYPE_AWS="aws"
IMAGE_TYPE_AZURE="azure"
IMAGE_TYPE_EDGE_COMMIT="edge-commit"
IMAGE_TYPE_EDGE_CONTAINER="edge-container"
IMAGE_TYPE_EDGE_INSTALLER="edge-installer"
IMAGE_TYPE_GCP="gcp"
IMAGE_TYPE_IMAGE_INSTALLER="image-installer"
IMAGE_TYPE_GUEST="guest-image"
IMAGE_TYPE_VSPHERE="vsphere"
# select cloud provider based on image type
#
# the supported image types are listed in the api spec (internal/cloudapi/v2/openapi.v2.yml)
case ${IMAGE_TYPE} in
"$IMAGE_TYPE_AWS")
CLOUD_PROVIDER="${CLOUD_PROVIDER_AWS}"
;;
"$IMAGE_TYPE_AZURE")
CLOUD_PROVIDER="${CLOUD_PROVIDER_AZURE}"
;;
"$IMAGE_TYPE_GCP")
CLOUD_PROVIDER="${CLOUD_PROVIDER_GCP}"
;;
"$IMAGE_TYPE_EDGE_COMMIT"|"$IMAGE_TYPE_EDGE_CONTAINER"|"$IMAGE_TYPE_EDGE_INSTALLER"|"$IMAGE_TYPE_IMAGE_INSTALLER"|"$IMAGE_TYPE_GUEST"|"$IMAGE_TYPE_VSPHERE")
# blobby image types: upload to s3 and provide download link
CLOUD_PROVIDER="${CLOUD_PROVIDER_AWS_S3}"
;;
*)
echo "Unknown image type: ${IMAGE_TYPE}"
exit 1
esac
greenprint "Using Cloud Provider / Target ${CLOUD_PROVIDER} for Image Type ${IMAGE_TYPE}" greenprint "Using Cloud Provider / Target ${CLOUD_PROVIDER} for Image Type ${IMAGE_TYPE}"
# #
@ -305,6 +313,13 @@ function cleanupVSphere() {
"${VSPHERE_VM_NAME}" "${VSPHERE_VM_NAME}"
} }
function cleanupGenericS3() {
MINIO_CONTAINER_NAME="${MINIO_CONTAINER_NAME:-}"
if [ -n "${MINIO_CONTAINER_NAME}" ]; then
sudo ${CONTAINER_RUNTIME} kill "${MINIO_CONTAINER_NAME}"
fi
}
function dump_db() { function dump_db() {
# Disable -x for these commands to avoid printing the whole result and manifest into the log # Disable -x for these commands to avoid printing the whole result and manifest into the log
set +x set +x
@ -337,6 +352,10 @@ function cleanup() {
"$CLOUD_PROVIDER_AZURE") "$CLOUD_PROVIDER_AZURE")
cleanupAzure cleanupAzure
;; ;;
"$CLOUD_PROVIDER_GENERIC_S3")
cleanupGenericS3
[[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]] && cleanupVSphere
;;
esac esac
# dump the DB here to ensure that it gets dumped even if the test fails # dump the DB here to ensure that it gets dumped even if the test fails
@ -472,6 +491,81 @@ function installClientVSphere() {
$GOVC_CMD version $GOVC_CMD version
} }
function installGenericS3() {
local CONTAINER_MINIO_SERVER="quay.io/minio/minio:latest"
MINIO_CONTAINER_NAME="minio-server"
MINIO_ENDPOINT="http://localhost:9000"
local MINIO_ROOT_USER="X29DU5Q6C5NKDQ8PLGVT"
local MINIO_ROOT_PASSWORD
MINIO_ROOT_PASSWORD=$(date +%s | sha256sum | base64 | head -c 32 ; echo)
MINIO_BUCKET="ci-test"
local MINIO_REGION="us-east-1"
local MINIO_CREDENTIALS_FILE="/etc/osbuild-worker/minio-creds"
sudo ${CONTAINER_RUNTIME} run --rm -d \
--name ${MINIO_CONTAINER_NAME} \
-p 9000:9000 \
-e MINIO_BROWSER=off \
-e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \
-e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \
${CONTAINER_MINIO_SERVER} server /data
if ! hash aws; then
echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${MINIO_ROOT_USER} \
-e AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD} \
-v ${WORKDIR}:${WORKDIR}:Z \
--network host \
${CONTAINER_IMAGE_CLOUD_TOOLS} aws"
else
echo "Using pre-installed 'aws' from the system"
AWS_CMD="AWS_ACCESS_KEY_ID=${MINIO_ROOT_USER} \
AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD} \
aws"
fi
AWS_CMD+=" --region $MINIO_REGION --output json --color on --endpoint-url $MINIO_ENDPOINT"
$AWS_CMD --version
# Configure the local server (retry until the service is up)
MINIO_CONFIGURE_RETRY=0
MINIO_CONFIGURE_MAX_RETRY=5
MINIO_RETRY_INTERVAL=15
until [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]
do
${AWS_CMD} s3 ls && break
MINIO_CONFIGURE_RETRY=$((MINIO_CONFIGURE_RETRY + 1))
echo "Retrying [${MINIO_CONFIGURE_RETRY}/${MINIO_CONFIGURE_MAX_RETRY}] in ${MINIO_RETRY_INTERVAL}(s) "
sleep ${MINIO_RETRY_INTERVAL}
done
if [ "${MINIO_CONFIGURE_RETRY}" -ge "${MINIO_CONFIGURE_MAX_RETRY}" ]; then
echo "Failed to communicate with the MinIO server after ${MINIO_CONFIGURE_MAX_RETRY} attempts!"
exit 1
fi
# Create the bucket
${AWS_CMD} s3 mb s3://${MINIO_BUCKET}
cat <<EOF | sudo tee "${MINIO_CREDENTIALS_FILE}"
[default]
aws_access_key_id = ${MINIO_ROOT_USER}
aws_secret_access_key = ${MINIO_ROOT_PASSWORD}
EOF
cat <<EOF | sudo tee "/etc/osbuild-worker/osbuild-worker.toml"
[generic_s3]
credentials = "${MINIO_CREDENTIALS_FILE}"
endpoint = "${MINIO_ENDPOINT}"
region = "${MINIO_REGION}"
bucket = "${MINIO_BUCKET}"
EOF
sudo systemctl restart "osbuild-worker@1"
}
case $CLOUD_PROVIDER in case $CLOUD_PROVIDER in
"$CLOUD_PROVIDER_AWS" | "$CLOUD_PROVIDER_AWS_S3") "$CLOUD_PROVIDER_AWS" | "$CLOUD_PROVIDER_AWS_S3")
installClientAWS installClientAWS
@ -483,6 +577,10 @@ case $CLOUD_PROVIDER in
"$CLOUD_PROVIDER_AZURE") "$CLOUD_PROVIDER_AZURE")
installClientAzure installClientAzure
;; ;;
"$CLOUD_PROVIDER_GENERIC_S3")
installGenericS3
[[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]] && installClientVSphere
;;
esac esac
# #
@ -607,7 +705,8 @@ EOF
# #
OSTREE_REF="test/rhel/8/edge" OSTREE_REF="test/rhel/8/edge"
function createReqFileAWSS3() { function createReqFileS3() {
local IMAGE_REQUEST_REGION=${1:-""}
cat > "$REQUEST_FILE" << EOF cat > "$REQUEST_FILE" << EOF
{ {
"distribution": "$DISTRO", "distribution": "$DISTRO",
@ -641,7 +740,7 @@ function createReqFileAWSS3() {
"ref": "${OSTREE_REF}" "ref": "${OSTREE_REF}"
}, },
"upload_options": { "upload_options": {
"region": "${AWS_REGION}" "region": "${IMAGE_REQUEST_REGION}"
} }
} }
} }
@ -650,7 +749,8 @@ EOF
# the VSphere test case does not create any additional users, # the VSphere test case does not create any additional users,
# since this is not supported by the service UI # since this is not supported by the service UI
function createReqFileAWSS3VSphere() { function createReqFileS3VSphere() {
local IMAGE_REQUEST_REGION=${1:-""}
cat > "$REQUEST_FILE" << EOF cat > "$REQUEST_FILE" << EOF
{ {
"distribution": "$DISTRO", "distribution": "$DISTRO",
@ -670,7 +770,7 @@ function createReqFileAWSS3VSphere() {
"image_type": "${IMAGE_TYPE}", "image_type": "${IMAGE_TYPE}",
"repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json), "repositories": $(jq ".\"$ARCH\"" /usr/share/tests/osbuild-composer/repositories/"$DISTRO".json),
"upload_options": { "upload_options": {
"region": "${AWS_REGION}" "region": "${IMAGE_REQUEST_REGION}"
} }
} }
} }
@ -767,9 +867,9 @@ case $CLOUD_PROVIDER in
;; ;;
"$CLOUD_PROVIDER_AWS_S3") "$CLOUD_PROVIDER_AWS_S3")
if [[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]]; then if [[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]]; then
createReqFileAWSS3VSphere createReqFileS3VSphere "${AWS_REGION}"
else else
createReqFileAWSS3 createReqFileS3 "${AWS_REGION}"
fi fi
;; ;;
"$CLOUD_PROVIDER_GCP") "$CLOUD_PROVIDER_GCP")
@ -778,6 +878,13 @@ case $CLOUD_PROVIDER in
"$CLOUD_PROVIDER_AZURE") "$CLOUD_PROVIDER_AZURE")
createReqFileAzure createReqFileAzure
;; ;;
"$CLOUD_PROVIDER_GENERIC_S3")
if [[ "${IMAGE_TYPE}" == "${IMAGE_TYPE_VSPHERE}" ]]; then
createReqFileS3VSphere
else
createReqFileS3
fi
;;
esac esac
# #
@ -879,7 +986,11 @@ waitForState
SUBS_COMPOSES="$(collectMetrics)" SUBS_COMPOSES="$(collectMetrics)"
test "$UPLOAD_STATUS" = "success" test "$UPLOAD_STATUS" = "success"
test "$UPLOAD_TYPE" = "$CLOUD_PROVIDER" EXPECTED_UPLOAD_TYPE="$CLOUD_PROVIDER"
if [ "${CLOUD_PROVIDER}" == "${CLOUD_PROVIDER_GENERIC_S3}" ]; then
EXPECTED_UPLOAD_TYPE="${CLOUD_PROVIDER_AWS_S3}"
fi
test "$UPLOAD_TYPE" = "$EXPECTED_UPLOAD_TYPE"
test $((INIT_COMPOSES+1)) = "$SUBS_COMPOSES" test $((INIT_COMPOSES+1)) = "$SUBS_COMPOSES"
# #
@ -925,6 +1036,15 @@ function checkUploadStatusOptionsAzure() {
test "$IMAGE_NAME" = "$AZURE_IMAGE_NAME" test "$IMAGE_NAME" = "$AZURE_IMAGE_NAME"
} }
function checkUploadStatusOptionsGenericS3() {
local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
# S3 URL contains endpoint and bucket name
echo "$S3_URL" | grep -F "$MINIO_ENDPOINT" -
echo "$S3_URL" | grep -F "$MINIO_BUCKET" -
}
case $CLOUD_PROVIDER in case $CLOUD_PROVIDER in
"$CLOUD_PROVIDER_AWS") "$CLOUD_PROVIDER_AWS")
checkUploadStatusOptionsAWS checkUploadStatusOptionsAWS
@ -938,6 +1058,9 @@ case $CLOUD_PROVIDER in
"$CLOUD_PROVIDER_AZURE") "$CLOUD_PROVIDER_AZURE")
checkUploadStatusOptionsAzure checkUploadStatusOptionsAzure
;; ;;
"$CLOUD_PROVIDER_GENERIC_S3")
checkUploadStatusOptionsGenericS3
;;
esac esac
# #
@ -1313,7 +1436,8 @@ function verifyInVSphere() {
} }
# Verify s3 blobs # Verify s3 blobs
function verifyInAWSS3() { function verifyInS3() {
local BUCKET_NAME=${1}
local S3_URL local S3_URL
S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url') S3_URL=$(echo "$UPLOAD_OPTIONS" | jq -r '.url')
greenprint "Verifying S3 object at ${S3_URL}" greenprint "Verifying S3 object at ${S3_URL}"
@ -1324,7 +1448,7 @@ function verifyInAWSS3() {
# tag the object, also verifying that it exists in the bucket as expected # tag the object, also verifying that it exists in the bucket as expected
$AWS_CMD s3api put-object-tagging \ $AWS_CMD s3api put-object-tagging \
--bucket "${AWS_BUCKET}" \ --bucket "${BUCKET_NAME}" \
--key "${S3_FILENAME}" \ --key "${S3_FILENAME}" \
--tagging '{"TagSet": [{ "Key": "gitlab-ci-test", "Value": "true" }]}' --tagging '{"TagSet": [{ "Key": "gitlab-ci-test", "Value": "true" }]}'
@ -1481,7 +1605,7 @@ case $CLOUD_PROVIDER in
verifyInAWS verifyInAWS
;; ;;
"$CLOUD_PROVIDER_AWS_S3") "$CLOUD_PROVIDER_AWS_S3")
verifyInAWSS3 verifyInS3 "${AWS_BUCKET}"
;; ;;
"$CLOUD_PROVIDER_GCP") "$CLOUD_PROVIDER_GCP")
verifyInGCP verifyInGCP
@ -1489,6 +1613,9 @@ case $CLOUD_PROVIDER in
"$CLOUD_PROVIDER_AZURE") "$CLOUD_PROVIDER_AZURE")
verifyInAzure verifyInAzure
;; ;;
"$CLOUD_PROVIDER_GENERIC_S3")
verifyInS3 "${MINIO_BUCKET}"
;;
esac esac
# Verify selected package (postgresql) is included in package list # Verify selected package (postgresql) is included in package list