OSBuild - add support for generic S3 services

jobimpl-osbuild
---------------
Add GenericS3Creds to struct
Add method to create AWS with Endpoint for Generic S3 (with its own credentials file)
Move uploading to S3 and result handling to a separate method (along with the special VMDK handling)
adjust the AWS S3 case to the new method
Implement a new case for uploading to a generic S3 service

awscloud
--------
Add wrapper methods for endpoint support
Set the endpoint to the AWS session
Set s3ForcePathStyle to true if endpoint was set

Target
------
Define a new target type for the GenericS3Target and Options
Handle unmarshaling of the target options and result for the Generic S3

Weldr
-----
Add support for only uploading to AWS S3
Define new structures for AWS S3 and Generic S3 (based on AWS S3)
Handle unmarshaling of the providers settings' upload settings

main
----
Add a section in the main config for the Generic S3 service for credentials
If provided pass the credentials file name to the osbuild job implementation

Upload Utility
--------------
Add upload-generic-s3 utility

Makefile
------
Do not fail if the bin directory already exists

Tests
-----
Add test cases for both AWS and a generic S3 server
Add a generic s3_test.sh file for both test cases and add it to the tests RPM spec
Adjust the libvirt test case script to support already created images
GitLabCI - Extend the libvirt test case to include the two new tests
This commit is contained in:
Ygal Blum 2022-03-28 15:34:30 +03:00 committed by Tomáš Hozza
parent 01880a76a2
commit bee14bf392
15 changed files with 684 additions and 137 deletions

View file

@ -442,7 +442,7 @@ API:
RUNNER:
- aws/fedora-35-x86_64
libvirt:
.libvirt_integration:
stage: test
extends: .terraform/openstack
rules:
@ -450,7 +450,7 @@ libvirt:
- !reference [.nightly_rules, rules]
script:
- schutzbot/deploy.sh
- /usr/libexec/tests/osbuild-composer/libvirt.sh
- /usr/libexec/tests/osbuild-composer/${SCRIPT}
parallel:
matrix:
- RUNNER:
@ -460,6 +460,21 @@ libvirt:
- rhos-01/rhel-9.0-nightly-x86_64
- rhos-01/centos-stream-9-x86_64
libvirt.sh:
extends: .libvirt_integration
variables:
SCRIPT: libvirt.sh
generic_s3.sh:
extends: .libvirt_integration
variables:
SCRIPT: generic_s3.sh
aws_s3.sh:
extends: .libvirt_integration
variables:
SCRIPT: aws_s3.sh
RHEL 9 on 8:
stage: test
extends: .terraform

View file

@ -108,7 +108,7 @@ man: $(MANPAGES_TROFF)
.PHONY: build
build:
- mkdir bin
- mkdir -p bin
go build -o bin/osbuild-composer ./cmd/osbuild-composer/
go build -o bin/osbuild-worker ./cmd/osbuild-worker/
go build -o bin/osbuild-pipeline ./cmd/osbuild-pipeline/
@ -116,6 +116,7 @@ build:
go build -o bin/osbuild-upload-aws ./cmd/osbuild-upload-aws/
go build -o bin/osbuild-upload-gcp ./cmd/osbuild-upload-gcp/
go build -o bin/osbuild-upload-oci ./cmd/osbuild-upload-oci/
go build -o bin/osbuild-upload-generic-s3 ./cmd/osbuild-upload-generic-s3/
go build -o bin/osbuild-mock-openid-provider ./cmd/osbuild-mock-openid-provider
go build -o bin/osbuild-service-maintenance ./cmd/osbuild-service-maintenance
go test -c -tags=integration -o bin/osbuild-composer-cli-tests ./cmd/osbuild-composer-cli-tests/main_test.go

View file

@ -0,0 +1,44 @@
package main
import (
"flag"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/osbuild/osbuild-composer/internal/cloud/awscloud"
)
func main() {
var accessKeyID string
var secretAccessKey string
var sessionToken string
var region string
var endpoint string
var bucketName string
var keyName string
var filename string
flag.StringVar(&accessKeyID, "access-key-id", "", "access key ID")
flag.StringVar(&secretAccessKey, "secret-access-key", "", "secret access key")
flag.StringVar(&sessionToken, "session-token", "", "session token")
flag.StringVar(&region, "region", "", "target region")
flag.StringVar(&endpoint, "endpoint", "", "target endpoint")
flag.StringVar(&bucketName, "bucket", "", "target S3 bucket name")
flag.StringVar(&keyName, "key", "", "target S3 key name")
flag.StringVar(&filename, "image", "", "image file to upload")
flag.Parse()
a, err := awscloud.NewForEndpoint(endpoint, region, accessKeyID, secretAccessKey, sessionToken)
if err != nil {
println(err.Error())
return
}
uploadOutput, err := a.Upload(filename, bucketName, keyName)
if err != nil {
println(err.Error())
return
}
fmt.Printf("file uploaded to %s\n", aws.StringValue(&uploadOutput.Location))
}

View file

@ -30,13 +30,14 @@ import (
)
type OSBuildJobImpl struct {
Store string
Output string
KojiServers map[string]koji.GSSAPICredentials
GCPCreds []byte
AzureCreds *azure.Credentials
AWSCreds string
AWSBucket string
Store string
Output string
KojiServers map[string]koji.GSSAPICredentials
GCPCreds []byte
AzureCreds *azure.Credentials
AWSCreds string
AWSBucket string
GenericS3Creds string
}
// Returns an *awscloud.AWS object with the credentials of the request. If they
@ -52,6 +53,16 @@ func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string
}
}
func (impl *OSBuildJobImpl) getAWSForEndpoint(endpoint, region, accessId, secret, token string) (*awscloud.AWS, error) {
if accessId != "" && secret != "" {
return awscloud.NewForEndpoint(endpoint, region, accessId, secret, token)
}
if impl.GenericS3Creds != "" {
return awscloud.NewForEndpointFromFile(impl.GenericS3Creds, endpoint, region)
}
return nil, fmt.Errorf("no credentials found")
}
func validateResult(result *worker.OSBuildJobResult, jobID string) {
logWithId := logrus.WithField("jobId", jobID)
if result.JobError != nil {
@ -71,6 +82,65 @@ func validateResult(result *worker.OSBuildJobResult, jobID string) {
result.Success = true
}
func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filename string, osbuildJobResult *worker.OSBuildJobResult, genericS3 bool, streamOptimized bool, streamOptimizedPath string) (err error) {
imagePath := path.Join(outputDirectory, exportPath, filename)
// *** SPECIAL VMDK HANDLING START ***
// Upload the VMDK image as stream-optimized.
// The VMDK conversion is applied only when the job was submitted by Weldr API,
// therefore we need to do the conversion here explicitly if it was not done.
if streamOptimized {
// If the streamOptimizedPath is empty, the conversion was not done
if streamOptimizedPath == "" {
var f *os.File
f, err = vmware.OpenAsStreamOptimizedVmdk(imagePath)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil
}
streamOptimizedPath = f.Name()
f.Close()
}
// Replace the original file by the stream-optimized one
err = os.Rename(streamOptimizedPath, imagePath)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil
}
}
// *** SPECIAL VMDK HANDLING END ***
if key == "" {
key = uuid.New().String()
}
key += "-" + filename
_, err = a.Upload(imagePath, bucket, key)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
return
}
url, err := a.S3ObjectPresignedURL(bucket, key)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
return
}
var targetResult *target.TargetResult
if genericS3 {
targetResult = target.NewGenericS3TargetResult(&target.GenericS3TargetResultOptions{URL: url})
} else {
targetResult = target.NewAWSS3TargetResult(&target.AWSS3TargetResultOptions{URL: url})
}
osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, targetResult)
osbuildJobResult.Success = true
osbuildJobResult.UploadStatus = "success"
return
}
func (impl *OSBuildJobImpl) Run(job worker.Job) error {
logWithId := logrus.WithField("jobId", job.Id().String())
// Initialize variable needed for reporting back to osbuild-composer.
@ -314,59 +384,26 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error {
return nil
}
key := options.Key
if key == "" {
key = uuid.New().String()
}
key += "-" + options.Filename
bucket := options.Bucket
if impl.AWSBucket != "" {
bucket = impl.AWSBucket
}
imagePath := path.Join(outputDirectory, exportPath, options.Filename)
// *** SPECIAL VMDK HANDLING START ***
// Upload the VMDK image as stream-optimized.
// The VMDK conversion is applied only when the job was submitted by Weldr API,
// therefore we need to do the conversion here explicitly if it was not done.
if args.StreamOptimized {
// If the streamOptimizedPath is empty, the conversion was not done
if streamOptimizedPath == "" {
var f *os.File
f, err = vmware.OpenAsStreamOptimizedVmdk(imagePath)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil
}
streamOptimizedPath = f.Name()
f.Close()
}
// Replace the original file by the stream-optimized one
err = os.Rename(streamOptimizedPath, imagePath)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil
}
}
// *** SPECIAL VMDK HANDLING END ***
_, err = a.Upload(imagePath, bucket, key)
err = uploadToS3(a, outputDirectory, exportPath, bucket, options.Key, options.Filename, osbuildJobResult, false, args.StreamOptimized, streamOptimizedPath)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
return nil
}
url, err := a.S3ObjectPresignedURL(bucket, key)
case *target.GenericS3TargetOptions:
a, err := impl.getAWSForEndpoint(options.Endpoint, options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken)
if err != nil {
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
return nil
}
osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, target.NewAWSS3TargetResult(&target.AWSS3TargetResultOptions{URL: url}))
osbuildJobResult.Success = true
osbuildJobResult.UploadStatus = "success"
err = uploadToS3(a, outputDirectory, exportPath, options.Bucket, options.Key, options.Filename, osbuildJobResult, true, args.StreamOptimized, streamOptimizedPath)
if err != nil {
return nil
}
case *target.AzureTargetOptions:
azureStorageClient, err := azure.NewStorageClient(options.StorageAccount, options.StorageAccessKey)
if err != nil {

View file

@ -139,6 +139,9 @@ func main() {
Credentials string `toml:"credentials"`
Bucket string `toml:"bucket"`
} `toml:"aws"`
GenericS3 *struct {
Credentials string `toml:"credentials"`
} `toml:"generic_s3"`
Authentication *struct {
OAuthURL string `toml:"oauth_url"`
OfflineTokenPath string `toml:"offline_token"`
@ -306,6 +309,11 @@ func main() {
awsBucket = config.AWS.Bucket
}
var genericS3Credentials = ""
if config.GenericS3 != nil {
genericS3Credentials = config.GenericS3.Credentials
}
// depsolve jobs can be done during other jobs
depsolveCtx, depsolveCtxCancel := context.WithCancel(context.Background())
defer depsolveCtxCancel()
@ -340,13 +348,14 @@ func main() {
// non-depsolve job
jobImpls := map[string]JobImplementation{
"osbuild": &OSBuildJobImpl{
Store: store,
Output: output,
KojiServers: kojiServers,
GCPCreds: gcpCredentials,
AzureCreds: azureCredentials,
AWSCreds: awsCredentials,
AWSBucket: awsBucket,
Store: store,
Output: output,
KojiServers: kojiServers,
GCPCreds: gcpCredentials,
AzureCreds: azureCredentials,
AWSCreds: awsCredentials,
AWSBucket: awsBucket,
GenericS3Creds: genericS3Credentials,
},
"osbuild-koji": &OSBuildKojiJobImpl{
Store: store,

View file

@ -22,11 +22,14 @@ type AWS struct {
}
// Create a new session from the credentials and the region and returns an *AWS object initialized with it.
func newAwsFromCreds(creds *credentials.Credentials, region string) (*AWS, error) {
func newAwsFromCreds(creds *credentials.Credentials, region string, endpoint *string) (*AWS, error) {
// Create a Session with a custom region
s3ForcePathStyle := endpoint != nil
sess, err := session.NewSession(&aws.Config{
Credentials: creds,
Region: aws.String(region),
Credentials: creds,
Region: aws.String(region),
Endpoint: endpoint,
S3ForcePathStyle: &s3ForcePathStyle,
})
if err != nil {
return nil, err
@ -41,7 +44,7 @@ func newAwsFromCreds(creds *credentials.Credentials, region string) (*AWS, error
// Initialize a new AWS object from individual bits. SessionToken is optional
func New(region string, accessKeyID string, accessKey string, sessionToken string) (*AWS, error) {
return newAwsFromCreds(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region)
return newAwsFromCreds(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region, nil)
}
// Initializes a new AWS object with the credentials info found at filename's location.
@ -54,13 +57,31 @@ func New(region string, accessKeyID string, accessKey string, sessionToken strin
// "AWS_SHARED_CREDENTIALS_FILE" env variable or will default to
// $HOME/.aws/credentials.
func NewFromFile(filename string, region string) (*AWS, error) {
return newAwsFromCreds(credentials.NewSharedCredentials(filename, "default"), region)
return newAwsFromCreds(credentials.NewSharedCredentials(filename, "default"), region, nil)
}
// Initialize a new AWS object from defaults.
// Looks for env variables, shared credential file, and EC2 Instance Roles.
func NewDefault(region string) (*AWS, error) {
return newAwsFromCreds(nil, region)
return newAwsFromCreds(nil, region, nil)
}
// Initialize a new AWS object targeting a specific endpoint from individual bits. SessionToken is optional
func NewForEndpoint(endpoint, region string, accessKeyID string, accessKey string, sessionToken string) (*AWS, error) {
return newAwsFromCreds(credentials.NewStaticCredentials(accessKeyID, accessKey, sessionToken), region, &endpoint)
}
// Initializes a new AWS object targeting a specific endpoint with the credentials info found at filename's location.
// The credential files should match the AWS format, such as:
// [default]
// aws_access_key_id = secretString1
// aws_secret_access_key = secretString2
//
// If filename is empty the underlying function will look for the
// "AWS_SHARED_CREDENTIALS_FILE" env variable or will default to
// $HOME/.aws/credentials.
func NewForEndpointFromFile(filename string, endpoint, region string) (*AWS, error) {
return newAwsFromCreds(credentials.NewSharedCredentials(filename, "default"), region, &endpoint)
}
func (a *AWS) Upload(filename, bucket, key string) (*s3manager.UploadOutput, error) {

View file

@ -0,0 +1,20 @@
package target
type GenericS3TargetOptions struct {
AWSS3TargetOptions
Endpoint string `json:"endpoint"`
}
func (GenericS3TargetOptions) isTargetOptions() {}
func NewGenericS3Target(options *GenericS3TargetOptions) *Target {
return newTarget("org.osbuild.generic.s3", options)
}
type GenericS3TargetResultOptions AWSS3TargetResultOptions
func (GenericS3TargetResultOptions) isTargetResultOptions() {}
func NewGenericS3TargetResult(options *GenericS3TargetResultOptions) *TargetResult {
return newTargetResult("org.osbuild.generic.s3", options)
}

View file

@ -83,6 +83,8 @@ func UnmarshalTargetOptions(targetName string, rawOptions json.RawMessage) (Targ
options = new(VMWareTargetOptions)
case "org.osbuild.oci":
options = new(OCITargetOptions)
case "org.osbuild.generic.s3":
options = new(GenericS3TargetOptions)
default:
return nil, errors.New("unexpected target name")
}

View file

@ -55,8 +55,10 @@ func UnmarshalTargetResultOptions(trName string, rawOptions json.RawMessage) (Ta
options = new(AzureImageTargetResultOptions)
case "org.osbuild.oci":
options = new(OCITargetResultOptions)
case "org.osbuild.generic.s3":
options = new(GenericS3TargetResultOptions)
default:
return nil, fmt.Errorf("Unexpected target result name: %s", trName)
return nil, fmt.Errorf("unexpected target result name: %s", trName)
}
err := json.Unmarshal(rawOptions, options)

View file

@ -36,6 +36,17 @@ type awsUploadSettings struct {
func (awsUploadSettings) isUploadSettings() {}
type awsS3UploadSettings struct {
Region string `json:"region"`
AccessKeyID string `json:"accessKeyID,omitempty"`
SecretAccessKey string `json:"secretAccessKey,omitempty"`
SessionToken string `json:"sessionToken,omitempty"`
Bucket string `json:"bucket"`
Key string `json:"key"`
}
func (awsS3UploadSettings) isUploadSettings() {}
type azureUploadSettings struct {
StorageAccount string `json:"storageAccount,omitempty"`
StorageAccessKey string `json:"storageAccessKey,omitempty"`
@ -68,6 +79,13 @@ type ociUploadSettings struct {
func (ociUploadSettings) isUploadSettings() {}
type genericS3UploadSettings struct {
awsS3UploadSettings
Endpoint string `json:"endpoint"`
}
func (genericS3UploadSettings) isUploadSettings() {}
type uploadRequest struct {
Provider string `json:"provider"`
ImageName string `json:"image_name"`
@ -93,10 +111,14 @@ func (u *uploadRequest) UnmarshalJSON(data []byte) error {
settings = new(azureUploadSettings)
case "aws":
settings = new(awsUploadSettings)
case "aws.s3":
settings = new(awsS3UploadSettings)
case "vmware":
settings = new(vmwareUploadSettings)
case "oci":
settings = new(ociUploadSettings)
case "generic.s3":
settings = new(genericS3UploadSettings)
default:
return errors.New("unexpected provider name")
}
@ -167,6 +189,27 @@ func targetsToUploadResponses(targets []*target.Target, state ComposeState) []up
// Username and Password are intentionally not included.
}
uploads = append(uploads, upload)
case *target.AWSS3TargetOptions:
upload.ProviderName = "aws.s3"
upload.Settings = &awsS3UploadSettings{
Region: options.Region,
Bucket: options.Bucket,
Key: options.Key,
// AccessKeyID and SecretAccessKey are intentionally not included.
}
uploads = append(uploads, upload)
case *target.GenericS3TargetOptions:
upload.ProviderName = "generic.s3"
upload.Settings = &genericS3UploadSettings{
awsS3UploadSettings: awsS3UploadSettings{
Region: options.Region,
Bucket: options.Bucket,
Key: options.Key,
// AccessKeyID and SecretAccessKey are intentionally not included.
},
Endpoint: options.Endpoint,
}
uploads = append(uploads, upload)
}
}
@ -193,6 +236,17 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target.
Bucket: options.Bucket,
Key: options.Key,
}
case *awsS3UploadSettings:
t.Name = "org.osbuild.aws.s3"
t.Options = &target.AWSS3TargetOptions{
Filename: imageType.Filename(),
Region: options.Region,
AccessKeyID: options.AccessKeyID,
SecretAccessKey: options.SecretAccessKey,
SessionToken: options.SessionToken,
Bucket: options.Bucket,
Key: options.Key,
}
case *azureUploadSettings:
t.Name = "org.osbuild.azure"
t.Options = &target.AzureTargetOptions{
@ -225,6 +279,20 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target.
Namespace: options.Namespace,
Compartment: options.Compartment,
}
case *genericS3UploadSettings:
t.Name = "org.osbuild.generic.s3"
t.Options = &target.GenericS3TargetOptions{
AWSS3TargetOptions: target.AWSS3TargetOptions{
Filename: imageType.Filename(),
Region: options.Region,
AccessKeyID: options.AccessKeyID,
SecretAccessKey: options.SecretAccessKey,
SessionToken: options.SessionToken,
Bucket: options.Bucket,
Key: options.Key,
},
Endpoint: options.Endpoint,
}
}
return &t

View file

@ -228,6 +228,7 @@ install -m 0755 -vp tools/run-koji-container.sh %{buildroot}%
install -m 0755 -vp tools/koji-compose.py %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/koji-compose-v2.py %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/libvirt_test.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/s3_test.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/set-env-variables.sh %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vp tools/test-case-generators/generate-test-cases %{buildroot}%{_libexecdir}/osbuild-composer-test/
install -m 0755 -vd %{buildroot}%{_libexecdir}/tests/osbuild-composer

72
test/cases/aws_s3.sh Executable file
View file

@ -0,0 +1,72 @@
#!/bin/bash
set -euo pipefail
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
# Container image used for cloud provider CLI tools
CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest"
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh
# Check available container runtime
if which podman 2>/dev/null >&2; then
CONTAINER_RUNTIME=podman
elif which docker 2>/dev/null >&2; then
CONTAINER_RUNTIME=docker
else
echo No container runtime found, install podman or docker.
exit 2
fi
TEMPDIR=$(mktemp -d)
function cleanup() {
sudo rm -rf "$TEMPDIR"
}
trap cleanup EXIT
# Generate a string, which can be used as a predictable resource name,
# especially when running the test in CI where we may need to clean up
# resources in case the test unexpectedly fails or is canceled
CI="${CI:-false}"
if [[ "$CI" == true ]]; then
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
else
# if not running in Jenkins, generate ID not relying on specific env variables
TEST_ID=$(uuidgen);
fi
# Set up temporary files.
AWS_S3_PROVIDER_CONFIG=${TEMPDIR}/aws.toml
# We need awscli to talk to AWS.
if ! hash aws; then
echo "Using 'awscli' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_IMAGE_CLOUD_TOOLS}
AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \
-e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \
${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION"
else
echo "Using pre-installed 'aws' from the system"
AWS_CMD="aws --region $AWS_REGION"
fi
$AWS_CMD --version
# Write an AWS TOML file
tee "$AWS_S3_PROVIDER_CONFIG" > /dev/null << EOF
provider = "aws.s3"
[settings]
accessKeyID = "${V2_AWS_ACCESS_KEY_ID}"
secretAccessKey = "${V2_AWS_SECRET_ACCESS_KEY}"
bucket = "${AWS_BUCKET}"
region = "${AWS_REGION}"
key = "${TEST_ID}"
EOF
IMAGE_OBJECT_KEY="${AWS_BUCKET}/${TEST_ID}-disk.qcow2"
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${AWS_S3_PROVIDER_CONFIG}" "${AWS_CMD} s3 ls ${IMAGE_OBJECT_KEY}" "${AWS_CMD} s3 presign ${IMAGE_OBJECT_KEY}" "${AWS_CMD} s3 rm s3://${IMAGE_OBJECT_KEY}"

98
test/cases/generic_s3.sh Executable file
View file

@ -0,0 +1,98 @@
#!/bin/bash
set -euo pipefail
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
# Container images for MinIO Server and Client
CONTAINER_MINIO_CLIENT="quay.io/minio/mc:latest"
CONTAINER_MINIO_SERVER="quay.io/minio/minio:latest"
# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh
# Check available container runtime
if which podman 2>/dev/null >&2; then
CONTAINER_RUNTIME=podman
elif which docker 2>/dev/null >&2; then
CONTAINER_RUNTIME=docker
else
echo No container runtime found, install podman or docker.
exit 2
fi
TEMPDIR=$(mktemp -d)
function cleanup() {
sudo rm -rf "$TEMPDIR"
}
trap cleanup EXIT
# Generate a string, which can be used as a predictable resource name,
# especially when running the test in CI where we may need to clean up
# resources in case the test unexpectedly fails or is canceled
CI="${CI:-false}"
if [[ "$CI" == true ]]; then
# in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go
TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_BUILD_ID"
else
# if not running in Jenkins, generate ID not relying on specific env variables
TEST_ID=$(uuidgen);
fi
# Set up temporary files.
MINIO_CONFIG_DIR=${TEMPDIR}/minio-config
MINIO_PROVIDER_CONFIG=${TEMPDIR}/minio.toml
# We need MinIO Client to talk to the MinIO Server.
if ! hash mc; then
echo "Using 'mc' from a container"
sudo ${CONTAINER_RUNTIME} pull ${CONTAINER_MINIO_CLIENT}
MC_CMD="sudo ${CONTAINER_RUNTIME} run --rm \
-v ${MINIO_CONFIG_DIR}:${MINIO_CONFIG_DIR}:Z \
--network=host \
${CONTAINER_MINIO_CLIENT} --config-dir=${MINIO_CONFIG_DIR}"
else
echo "Using pre-installed 'mc' from the system"
MC_CMD="mc --config-dir=${MINIO_CONFIG_DIR}"
fi
mkdir "${MINIO_CONFIG_DIR}"
$MC_CMD --version
MINIO_CONTAINER_NAME="minio-server"
MINIO_ENDPOINT="http://localhost:9000"
MINIO_ROOT_USER="X29DU5Q6C5NKDQ8PLGVT"
MINIO_ROOT_PASSWORD=$(date +%s | sha256sum | base64 | head -c 32 ; echo)
MINIO_SERVER_ALIAS=local
MINIO_BUCKET="ci-test"
MINIO_REGION="us-east-1"
# Write an AWS TOML file
tee "$MINIO_PROVIDER_CONFIG" > /dev/null << EOF
provider = "generic.s3"
[settings]
endpoint = "${MINIO_ENDPOINT}"
accessKeyID = "${MINIO_ROOT_USER}"
secretAccessKey = "${MINIO_ROOT_PASSWORD}"
bucket = "${MINIO_BUCKET}"
region = "${MINIO_REGION}"
key = "${TEST_ID}"
EOF
# Start the MinIO Server
${CONTAINER_RUNTIME} run --rm -d \
--name ${MINIO_CONTAINER_NAME} \
-p 9000:9000 \
-e MINIO_BROWSER=off \
-e MINIO_ROOT_USER="${MINIO_ROOT_USER}" \
-e MINIO_ROOT_PASSWORD="${MINIO_ROOT_PASSWORD}" \
${CONTAINER_MINIO_SERVER} server /data
# Kill the server once we're done
trap '${CONTAINER_RUNTIME} kill ${MINIO_CONTAINER_NAME}' EXIT
# Configure the local server
${MC_CMD} alias set ${MINIO_SERVER_ALIAS} ${MINIO_ENDPOINT} ${MINIO_ROOT_USER} "${MINIO_ROOT_PASSWORD}"
# Create the bucket
${MC_CMD} mb ${MINIO_SERVER_ALIAS}/${MINIO_BUCKET}
IMAGE_OBJECT_KEY="${MINIO_SERVER_ALIAS}/${MINIO_BUCKET}/${TEST_ID}-disk.qcow2"
/usr/libexec/osbuild-composer-test/s3_test.sh "${TEST_ID}" "${MINIO_PROVIDER_CONFIG}" "${MC_CMD} ls ${IMAGE_OBJECT_KEY}" "${MC_CMD} --json share download ${IMAGE_OBJECT_KEY} | jq .share | tr -d '\"'"

View file

@ -17,6 +17,8 @@ IMAGE_TYPE=${1:-qcow2}
# Take the boot type passed to the script or use BIOS by default if nothing
# was passed.
BOOT_TYPE=${2:-bios}
# Take the image from the url passes to the script or build it by default if nothing
LIBVIRT_IMAGE_URL=${3:-""}
# Select the file extension based on the image that we are building.
IMAGE_EXTENSION=$IMAGE_TYPE
@ -131,82 +133,90 @@ get_compose_metadata () {
sudo cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Write a basic blueprint for our image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bp"
description = "A base system"
version = "0.0.1"
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bp
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Stop watching the worker journal when exiting.
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
# Start the compose
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bp "$IMAGE_TYPE" | tee "$COMPOSE_START"
if rpm -q --quiet weldr-client; then
COMPOSE_ID=$(jq -r '.body.build_id' "$COMPOSE_START")
else
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
fi
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
if rpm -q --quiet weldr-client; then
COMPOSE_STATUS=$(jq -r '.body.queue_status' "$COMPOSE_INFO")
else
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
fi
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 5
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Kill the journal monitor immediately and remove the trap
sudo pkill -P ${WORKER_JOURNAL_PID}
trap - EXIT
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Download the image.
greenprint "📥 Downloading the image"
# Current $PWD is inside /tmp, there may not be enough space for an image.
# Let's use a bigger temporary directory for this operation.
BIG_TEMP_DIR=/var/lib/osbuild-composer-tests
sudo rm -rf "${BIG_TEMP_DIR}" || true
sudo mkdir "${BIG_TEMP_DIR}"
pushd "${BIG_TEMP_DIR}"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME=$(basename "$(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}")")
if [ -z "${LIBVIRT_IMAGE_URL}" ]; then
# Write a basic blueprint for our image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "bp"
description = "A base system"
version = "0.0.1"
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve bp
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Stop watching the worker journal when exiting.
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
# Start the compose
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start bp "$IMAGE_TYPE" | tee "$COMPOSE_START"
if rpm -q --quiet weldr-client; then
COMPOSE_ID=$(jq -r '.body.build_id' "$COMPOSE_START")
else
COMPOSE_ID=$(jq -r '.build_id' "$COMPOSE_START")
fi
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
if rpm -q --quiet weldr-client; then
COMPOSE_STATUS=$(jq -r '.body.queue_status' "$COMPOSE_INFO")
else
COMPOSE_STATUS=$(jq -r '.queue_status' "$COMPOSE_INFO")
fi
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 5
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Kill the journal monitor immediately and remove the trap
sudo pkill -P ${WORKER_JOURNAL_PID}
trap - EXIT
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Download the image.
greenprint "📥 Downloading the image"
pushd "${BIG_TEMP_DIR}"
sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null
IMAGE_FILENAME=$(basename "$(find . -maxdepth 1 -type f -name "*.${IMAGE_EXTENSION}")")
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
popd
else
pushd "${BIG_TEMP_DIR}"
LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.${IMAGE_EXTENSION}
sudo mv "$IMAGE_FILENAME" "$LIBVIRT_IMAGE_PATH"
popd
sudo curl -o "${LIBVIRT_IMAGE_PATH}" "${LIBVIRT_IMAGE_URL}"
popd
fi
# Prepare cloud-init data.
CLOUD_INIT_DIR=$(mktemp -d)
@ -323,8 +333,10 @@ else
fi
sudo rm -f "$LIBVIRT_IMAGE_PATH" $CLOUD_INIT_PATH
# Also delete the compose so we don't run out of disk space
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
if [ -z "${LIBVIRT_IMAGE_URL}" ]; then
# Also delete the compose so we don't run out of disk space
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
fi
# Use the return code of the smoke test to determine if we passed or failed.
if [[ $RESULTS == 1 ]]; then

145
tools/s3_test.sh Executable file
View file

@ -0,0 +1,145 @@
#!/bin/bash
set -euo pipefail
source /usr/libexec/osbuild-composer-test/set-env-variables.sh
TEST_ID=${1}
S3_PROVIDER_CONFIG_FILE=${2}
S3_CHECK_CMD=${3}
S3_GET_URL_CMD=${4}
S3_DELETE_CMD=${5:-""}
# Colorful output.
function greenprint {
echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
}
function get_build_info() {
key="$1"
fname="$2"
if rpm -q --quiet weldr-client; then
key=".body${key}"
fi
jq -r "${key}" "${fname}"
}
TEMPDIR=$(mktemp -d)
function cleanup() {
sudo rm -rf "$TEMPDIR"
}
trap cleanup EXIT
# Jenkins sets WORKSPACE to the job workspace, but if this script runs
# outside of Jenkins, we can set up a temporary directory instead.
if [[ ${WORKSPACE:-empty} == empty ]]; then
WORKSPACE=$(mktemp -d)
fi
# Set up temporary files.
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
BLUEPRINT_NAME=empty
COMPOSE_START=${TEMPDIR}/compose-start-${TEST_ID}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${TEST_ID}.json
# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.log
# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}
# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${WORKSPACE}/osbuild-${ID}-${VERSION_ID}-aws.json
# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null
# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
sudo tar -xf "$TARBALL"
sudo rm -f "$TARBALL"
# Move the JSON file into place.
sudo cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}
# Write a basic blueprint for our image.
tee "$BLUEPRINT_FILE" > /dev/null << EOF
name = "${BLUEPRINT_NAME}"
description = "A base system with bash"
version = "0.0.1"
EOF
# Prepare the blueprint for the compose.
greenprint "📋 Preparing blueprint"
sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve ${BLUEPRINT_NAME}
# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Stop watching the worker journal when exiting.
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT
# Start the compose and upload to AWS.
greenprint "🚀 Starting compose"
sudo composer-cli --json compose start ${BLUEPRINT_NAME} qcow2 "$TEST_ID" "$S3_PROVIDER_CONFIG_FILE" | tee "$COMPOSE_START"
COMPOSE_ID=$(get_build_info ".build_id" "$COMPOSE_START")
# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(get_build_info ".queue_status" "$COMPOSE_INFO")
# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi
# Wait 30 seconds and try again.
sleep 30
done
# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"
# Kill the journal monitor immediately and remove the trap
sudo pkill -P ${WORKER_JOURNAL_PID}
trap - EXIT
# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
echo "Something went wrong with the compose. 😢"
exit 1
fi
# Delete the compose so we don't run out of disk space
sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null
# Find the image that we made in the AWS Bucket
greenprint "🔍 Search for created image"
if ! bash -c "${S3_CHECK_CMD}"; then
echo "Failed to find the image in the S3 Bucket"
exit 1
fi
function removeImageFromS3() {
bash -c "${S3_DELETE_CMD}"
}
if [ -n "${S3_DELETE_CMD}" ]; then
trap removeImageFromS3 EXIT
fi
# Generate a URL for the image
QCOW2_IMAGE_URL=$(bash -c "${S3_GET_URL_CMD}")
# Run the image on KVM
/usr/libexec/osbuild-composer-test/libvirt_test.sh qcow2 bios "${QCOW2_IMAGE_URL}"