Add support for importing the GCE image into GCP using Weldr API. The credentials to be used can be specified in the upload settings and will be then used by the worker to authenticate with GCP. The GCP target credentials are passed to Weldr API as base64 encoded content of the GCP credentials JSON file. The reason is that the JSON file contains many values and its format could change in the future. This way, the Weldr API does not rely on the credentials file content format in any way. Add a new test case for the GCP upload via Weldr and run it in CI. Signed-off-by: Tomas Hozza <thozza@redhat.com>
689 lines
24 KiB
Go
689 lines
24 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"crypto/rand"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"log"
|
|
"math"
|
|
"math/big"
|
|
"os"
|
|
"path"
|
|
"strings"
|
|
|
|
"github.com/osbuild/osbuild-composer/internal/upload/oci"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/sirupsen/logrus"
|
|
|
|
"github.com/osbuild/osbuild-composer/internal/cloud/awscloud"
|
|
"github.com/osbuild/osbuild-composer/internal/cloud/gcp"
|
|
"github.com/osbuild/osbuild-composer/internal/common"
|
|
osbuild "github.com/osbuild/osbuild-composer/internal/osbuild2"
|
|
"github.com/osbuild/osbuild-composer/internal/target"
|
|
"github.com/osbuild/osbuild-composer/internal/upload/azure"
|
|
"github.com/osbuild/osbuild-composer/internal/upload/koji"
|
|
"github.com/osbuild/osbuild-composer/internal/upload/vmware"
|
|
"github.com/osbuild/osbuild-composer/internal/worker"
|
|
"github.com/osbuild/osbuild-composer/internal/worker/clienterrors"
|
|
)
|
|
|
|
type OSBuildJobImpl struct {
|
|
Store string
|
|
Output string
|
|
KojiServers map[string]koji.GSSAPICredentials
|
|
GCPCreds string
|
|
AzureCreds *azure.Credentials
|
|
AWSCreds string
|
|
AWSBucket string
|
|
GenericS3Creds string
|
|
}
|
|
|
|
// Returns an *awscloud.AWS object with the credentials of the request. If they
|
|
// are not accessible, then try to use the one obtained in the worker
|
|
// configuration.
|
|
func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string, token string) (*awscloud.AWS, error) {
|
|
if accessId != "" && secret != "" {
|
|
return awscloud.New(region, accessId, secret, token)
|
|
} else if impl.AWSCreds != "" {
|
|
return awscloud.NewFromFile(impl.AWSCreds, region)
|
|
} else {
|
|
return awscloud.NewDefault(region)
|
|
}
|
|
}
|
|
|
|
func (impl *OSBuildJobImpl) getAWSForEndpoint(endpoint, region, accessId, secret, token string) (*awscloud.AWS, error) {
|
|
if accessId != "" && secret != "" {
|
|
return awscloud.NewForEndpoint(endpoint, region, accessId, secret, token)
|
|
}
|
|
if impl.GenericS3Creds != "" {
|
|
return awscloud.NewForEndpointFromFile(impl.GenericS3Creds, endpoint, region)
|
|
}
|
|
return nil, fmt.Errorf("no credentials found")
|
|
}
|
|
|
|
// getGCP returns an *gcp.GCP object using credentials based on the following
|
|
// predefined preference:
|
|
//
|
|
// 1. If the provided `credentials` parameter is not `nil`, it is used to
|
|
// authenticate with GCP.
|
|
//
|
|
// 2. If a path to GCP credentials file was provided in the worker's
|
|
// configuration, it is used to authenticate with GCP.
|
|
//
|
|
// 3. Use Application Default Credentials from the Google library, which tries
|
|
// to automatically find a way to authenticate using the following options:
|
|
//
|
|
// 3a. If `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set, it
|
|
// tries to load and use credentials form the file pointed to by the
|
|
// variable.
|
|
//
|
|
// 3b. It tries to authenticate using the service account attached to the
|
|
// resource which is running the code (e.g. Google Compute Engine VM).
|
|
func (impl *OSBuildJobImpl) getGCP(credentials []byte) (*gcp.GCP, error) {
|
|
if credentials != nil {
|
|
logrus.Info("[GCP] 🔑 using credentials provided with the job request")
|
|
return gcp.New(credentials)
|
|
} else if impl.GCPCreds != "" {
|
|
logrus.Info("[GCP] 🔑 using credentials from the worker configuration")
|
|
return gcp.NewFromFile(impl.GCPCreds)
|
|
} else {
|
|
logrus.Info("[GCP] 🔑 using Application Default Credentials via Google library")
|
|
return gcp.New(nil)
|
|
}
|
|
}
|
|
|
|
func validateResult(result *worker.OSBuildJobResult, jobID string) {
|
|
logWithId := logrus.WithField("jobId", jobID)
|
|
if result.JobError != nil {
|
|
logWithId.Errorf("osbuild job failed: %s", result.JobError.Reason)
|
|
return
|
|
}
|
|
// if the job failed, but the JobError is
|
|
// nil, we still need to handle this as an error
|
|
if result.OSBuildOutput == nil || !result.OSBuildOutput.Success {
|
|
reason := "osbuild job was unsuccessful"
|
|
logWithId.Errorf("osbuild job failed: %s", reason)
|
|
result.JobError = clienterrors.WorkerClientError(clienterrors.ErrorBuildJob, reason)
|
|
return
|
|
} else {
|
|
logWithId.Infof("osbuild job succeeded")
|
|
}
|
|
result.Success = true
|
|
}
|
|
|
|
func uploadToS3(a *awscloud.AWS, outputDirectory, exportPath, bucket, key, filename string, osbuildJobResult *worker.OSBuildJobResult, genericS3 bool, streamOptimized bool, streamOptimizedPath string) (err error) {
|
|
imagePath := path.Join(outputDirectory, exportPath, filename)
|
|
|
|
// *** SPECIAL VMDK HANDLING START ***
|
|
// Upload the VMDK image as stream-optimized.
|
|
// The VMDK conversion is applied only when the job was submitted by Weldr API,
|
|
// therefore we need to do the conversion here explicitly if it was not done.
|
|
if streamOptimized {
|
|
// If the streamOptimizedPath is empty, the conversion was not done
|
|
if streamOptimizedPath == "" {
|
|
var f *os.File
|
|
f, err = vmware.OpenAsStreamOptimizedVmdk(imagePath)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
streamOptimizedPath = f.Name()
|
|
f.Close()
|
|
}
|
|
// Replace the original file by the stream-optimized one
|
|
err = os.Rename(streamOptimizedPath, imagePath)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
}
|
|
// *** SPECIAL VMDK HANDLING END ***
|
|
|
|
if key == "" {
|
|
key = uuid.New().String()
|
|
}
|
|
key += "-" + filename
|
|
|
|
_, err = a.Upload(imagePath, bucket, key)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
|
|
return
|
|
}
|
|
|
|
url, err := a.S3ObjectPresignedURL(bucket, key)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
|
|
return
|
|
}
|
|
|
|
var targetResult *target.TargetResult
|
|
if genericS3 {
|
|
targetResult = target.NewGenericS3TargetResult(&target.GenericS3TargetResultOptions{URL: url})
|
|
} else {
|
|
targetResult = target.NewAWSS3TargetResult(&target.AWSS3TargetResultOptions{URL: url})
|
|
}
|
|
osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, targetResult)
|
|
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
|
|
return
|
|
}
|
|
|
|
func (impl *OSBuildJobImpl) Run(job worker.Job) error {
|
|
logWithId := logrus.WithField("jobId", job.Id().String())
|
|
// Initialize variable needed for reporting back to osbuild-composer.
|
|
var osbuildJobResult *worker.OSBuildJobResult = &worker.OSBuildJobResult{
|
|
Success: false,
|
|
OSBuildOutput: &osbuild.Result{
|
|
Success: false,
|
|
},
|
|
UploadStatus: "failure",
|
|
}
|
|
|
|
var outputDirectory string
|
|
|
|
// In all cases it is necessary to report result back to osbuild-composer worker API.
|
|
defer func() {
|
|
validateResult(osbuildJobResult, job.Id().String())
|
|
|
|
err := job.Update(osbuildJobResult)
|
|
if err != nil {
|
|
logWithId.Errorf("Error reporting job result: %v", err)
|
|
}
|
|
|
|
err = os.RemoveAll(outputDirectory)
|
|
if err != nil {
|
|
logWithId.Errorf("Error removing temporary output directory (%s): %v", outputDirectory, err)
|
|
}
|
|
}()
|
|
|
|
outputDirectory, err := ioutil.TempDir(impl.Output, job.Id().String()+"-*")
|
|
if err != nil {
|
|
return fmt.Errorf("error creating temporary output directory: %v", err)
|
|
}
|
|
|
|
// Read the job specification
|
|
var args worker.OSBuildJob
|
|
err = job.Args(&args)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// In case the manifest is empty, try to get it from dynamic args
|
|
if len(args.Manifest) == 0 {
|
|
if job.NDynamicArgs() > 0 {
|
|
var manifestJR worker.ManifestJobByIDResult
|
|
err = job.DynamicArgs(0, &manifestJR)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorParsingDynamicArgs, "Error parsing dynamic args")
|
|
return err
|
|
}
|
|
|
|
// skip the job if the manifest generation failed
|
|
if manifestJR.JobError != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorManifestDependency, "Manifest dependency failed")
|
|
return nil
|
|
}
|
|
args.Manifest = manifestJR.Manifest
|
|
if len(args.Manifest) == 0 {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorEmptyManifest, "Received empty manifest")
|
|
return nil
|
|
}
|
|
} else {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorEmptyManifest, "Job has no manifest")
|
|
return nil
|
|
}
|
|
}
|
|
// copy pipeline info to the result
|
|
osbuildJobResult.PipelineNames = args.PipelineNames
|
|
|
|
// The specification allows multiple upload targets because it is an array, but we don't support it.
|
|
// Return an error to osbuild-composer.
|
|
if len(args.Targets) > 1 {
|
|
logrus.Warnf("The job specification contains more than one upload target. This is not supported any more. " +
|
|
"This might indicate a deployment of incompatible osbuild-worker and osbuild-composer versions.")
|
|
return nil
|
|
}
|
|
|
|
exports := args.Exports
|
|
if len(exports) == 0 {
|
|
// job did not define exports, likely coming from an older version of composer
|
|
// fall back to default "assembler"
|
|
exports = []string{"assembler"}
|
|
} else if len(exports) > 1 {
|
|
// this worker only supports returning one (1) export
|
|
return fmt.Errorf("at most one build artifact can be exported")
|
|
}
|
|
|
|
// Run osbuild and handle two kinds of errors
|
|
osbuildJobResult.OSBuildOutput, err = RunOSBuild(args.Manifest, impl.Store, outputDirectory, exports, os.Stderr)
|
|
// First handle the case when "running" osbuild failed
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorBuildJob, "osbuild build failed")
|
|
return err
|
|
}
|
|
|
|
// Include pipeline stages output inside the worker's logs.
|
|
// Order pipelines based on PipelineNames from job
|
|
for _, pipelineName := range osbuildJobResult.PipelineNames.All() {
|
|
pipelineLog, hasLog := osbuildJobResult.OSBuildOutput.Log[pipelineName]
|
|
if !hasLog {
|
|
// no pipeline output
|
|
continue
|
|
}
|
|
logWithId.Infof("%s pipeline results:\n", pipelineName)
|
|
for _, stageResult := range pipelineLog {
|
|
if stageResult.Success {
|
|
logWithId.Infof(" %s success", stageResult.Type)
|
|
} else {
|
|
logWithId.Infof(" %s failure:", stageResult.Type)
|
|
stageOutput := strings.Split(stageResult.Output, "\n")
|
|
for _, line := range stageOutput {
|
|
logWithId.Infof(" %s", line)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Second handle the case when the build failed, but osbuild finished successfully
|
|
if !osbuildJobResult.OSBuildOutput.Success {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorBuildJob, "osbuild build failed")
|
|
return nil
|
|
}
|
|
|
|
streamOptimizedPath := ""
|
|
|
|
// NOTE: Currently OSBuild supports multiple exports, but this isn't used
|
|
// by any of the image types and it can't be specified during the request.
|
|
// Use the first (and presumably only) export for the imagePath.
|
|
exportPath := exports[0]
|
|
if osbuildJobResult.OSBuildOutput.Success && args.ImageName != "" {
|
|
var f *os.File
|
|
imagePath := path.Join(outputDirectory, exportPath, args.ImageName)
|
|
if args.StreamOptimized {
|
|
f, err = vmware.OpenAsStreamOptimizedVmdk(imagePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
streamOptimizedPath = f.Name()
|
|
} else {
|
|
f, err = os.Open(imagePath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
err = job.UploadArtifact(args.ImageName, f)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
f.Close()
|
|
}
|
|
|
|
if len(args.Targets) == 0 {
|
|
// There is no upload target, mark this job a success.
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
} else if len(args.Targets) == 1 {
|
|
switch options := args.Targets[0].Options.(type) {
|
|
case *target.VMWareTargetOptions:
|
|
credentials := vmware.Credentials{
|
|
Username: options.Username,
|
|
Password: options.Password,
|
|
Host: options.Host,
|
|
Cluster: options.Cluster,
|
|
Datacenter: options.Datacenter,
|
|
Datastore: options.Datastore,
|
|
}
|
|
|
|
tempDirectory, err := ioutil.TempDir(impl.Output, job.Id().String()+"-vmware-*")
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
|
|
defer func() {
|
|
err := os.RemoveAll(tempDirectory)
|
|
if err != nil {
|
|
logWithId.Errorf("Error removing temporary directory for vmware symlink(%s): %v", tempDirectory, err)
|
|
}
|
|
}()
|
|
|
|
// create a symlink so that uploaded image has the name specified by user
|
|
imageName := args.Targets[0].ImageName + ".vmdk"
|
|
imagePath := path.Join(tempDirectory, imageName)
|
|
err = os.Symlink(streamOptimizedPath, imagePath)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
|
|
err = vmware.UploadImage(credentials, imagePath)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
|
|
return nil
|
|
}
|
|
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
case *target.AWSTargetOptions:
|
|
a, err := impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
|
|
key := options.Key
|
|
if key == "" {
|
|
key = uuid.New().String()
|
|
}
|
|
|
|
bucket := options.Bucket
|
|
if impl.AWSBucket != "" {
|
|
bucket = impl.AWSBucket
|
|
}
|
|
_, err = a.Upload(path.Join(outputDirectory, exportPath, options.Filename), bucket, key)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
|
|
return nil
|
|
}
|
|
|
|
ami, err := a.Register(args.Targets[0].ImageName, bucket, key, options.ShareWithAccounts, common.CurrentArch())
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorImportingImage, err.Error())
|
|
return nil
|
|
}
|
|
|
|
if ami == nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorImportingImage, "No ami returned")
|
|
return nil
|
|
}
|
|
|
|
osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, target.NewAWSTargetResult(&target.AWSTargetResultOptions{
|
|
Ami: *ami,
|
|
Region: options.Region,
|
|
}))
|
|
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
case *target.AWSS3TargetOptions:
|
|
a, err := impl.getAWS(options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
|
|
bucket := options.Bucket
|
|
if impl.AWSBucket != "" {
|
|
bucket = impl.AWSBucket
|
|
}
|
|
|
|
err = uploadToS3(a, outputDirectory, exportPath, bucket, options.Key, options.Filename, osbuildJobResult, false, args.StreamOptimized, streamOptimizedPath)
|
|
if err != nil {
|
|
return nil
|
|
}
|
|
case *target.GenericS3TargetOptions:
|
|
a, err := impl.getAWSForEndpoint(options.Endpoint, options.Region, options.AccessKeyID, options.SecretAccessKey, options.SessionToken)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
|
|
err = uploadToS3(a, outputDirectory, exportPath, options.Bucket, options.Key, options.Filename, osbuildJobResult, true, args.StreamOptimized, streamOptimizedPath)
|
|
if err != nil {
|
|
return nil
|
|
}
|
|
case *target.AzureTargetOptions:
|
|
azureStorageClient, err := azure.NewStorageClient(options.StorageAccount, options.StorageAccessKey)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return err
|
|
}
|
|
|
|
metadata := azure.BlobMetadata{
|
|
StorageAccount: options.StorageAccount,
|
|
ContainerName: options.Container,
|
|
BlobName: args.Targets[0].ImageName,
|
|
}
|
|
|
|
const azureMaxUploadGoroutines = 4
|
|
err = azureStorageClient.UploadPageBlob(
|
|
metadata,
|
|
path.Join(outputDirectory, exportPath, options.Filename),
|
|
azureMaxUploadGoroutines,
|
|
)
|
|
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
|
|
return nil
|
|
}
|
|
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
case *target.GCPTargetOptions:
|
|
ctx := context.Background()
|
|
|
|
g, err := impl.getGCP(options.Credentials)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
|
|
logWithId.Infof("[GCP] 🚀 Uploading image to: %s/%s", options.Bucket, options.Object)
|
|
_, err = g.StorageObjectUpload(ctx, path.Join(outputDirectory, exportPath, options.Filename),
|
|
options.Bucket, options.Object, map[string]string{gcp.MetadataKeyImageName: args.Targets[0].ImageName})
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
|
|
return nil
|
|
}
|
|
|
|
logWithId.Infof("[GCP] 📥 Importing image into Compute Engine as '%s'", args.Targets[0].ImageName)
|
|
|
|
_, importErr := g.ComputeImageInsert(ctx, options.Bucket, options.Object, args.Targets[0].ImageName, []string{options.Region}, gcp.GuestOsFeaturesByDistro(options.Os))
|
|
if importErr == nil {
|
|
logWithId.Infof("[GCP] 🎉 Image import finished successfully")
|
|
}
|
|
|
|
// Cleanup storage before checking for errors
|
|
logWithId.Infof("[GCP] 🧹 Deleting uploaded image file: %s/%s", options.Bucket, options.Object)
|
|
if err = g.StorageObjectDelete(ctx, options.Bucket, options.Object); err != nil {
|
|
logWithId.Errorf("[GCP] Encountered error while deleting object: %v", err)
|
|
}
|
|
|
|
// check error from ComputeImageInsert()
|
|
if importErr != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorImportingImage, importErr.Error())
|
|
return nil
|
|
}
|
|
logWithId.Infof("[GCP] 💿 Image URL: %s", g.ComputeImageURL(args.Targets[0].ImageName))
|
|
|
|
if len(options.ShareWithAccounts) > 0 {
|
|
logWithId.Infof("[GCP] 🔗 Sharing the image with: %+v", options.ShareWithAccounts)
|
|
err = g.ComputeImageShare(ctx, args.Targets[0].ImageName, options.ShareWithAccounts)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorSharingTarget, err.Error())
|
|
return nil
|
|
}
|
|
}
|
|
|
|
osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, target.NewGCPTargetResult(&target.GCPTargetResultOptions{
|
|
ImageName: args.Targets[0].ImageName,
|
|
ProjectID: g.GetProjectID(),
|
|
}))
|
|
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
case *target.AzureImageTargetOptions:
|
|
ctx := context.Background()
|
|
|
|
if impl.AzureCreds == nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorSharingTarget, "osbuild job has org.osbuild.azure.image target but this worker doesn't have azure credentials")
|
|
return nil
|
|
}
|
|
|
|
c, err := azure.NewClient(*impl.AzureCreds, options.TenantID)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTargetConfig, err.Error())
|
|
return nil
|
|
}
|
|
logWithId.Info("[Azure] 🔑 Logged in Azure")
|
|
|
|
storageAccountTag := azure.Tag{
|
|
Name: "imageBuilderStorageAccount",
|
|
Value: fmt.Sprintf("location=%s", options.Location),
|
|
}
|
|
|
|
storageAccount, err := c.GetResourceNameByTag(
|
|
ctx,
|
|
options.SubscriptionID,
|
|
options.ResourceGroup,
|
|
storageAccountTag,
|
|
)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTargetConfig, fmt.Sprintf("searching for a storage account failed: %v", err))
|
|
return nil
|
|
}
|
|
|
|
if storageAccount == "" {
|
|
logWithId.Info("[Azure] 📦 Creating a new storage account")
|
|
const storageAccountPrefix = "ib"
|
|
storageAccount = azure.RandomStorageAccountName(storageAccountPrefix)
|
|
|
|
err := c.CreateStorageAccount(
|
|
ctx,
|
|
options.SubscriptionID,
|
|
options.ResourceGroup,
|
|
storageAccount,
|
|
options.Location,
|
|
storageAccountTag,
|
|
)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTargetConfig, fmt.Sprintf("creating a new storage account failed: %v", err))
|
|
return nil
|
|
}
|
|
}
|
|
|
|
logWithId.Info("[Azure] 🔑📦 Retrieving a storage account key")
|
|
storageAccessKey, err := c.GetStorageAccountKey(
|
|
ctx,
|
|
options.SubscriptionID,
|
|
options.ResourceGroup,
|
|
storageAccount,
|
|
)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTargetConfig, fmt.Sprintf("retrieving the storage account key failed: %v", err))
|
|
return nil
|
|
}
|
|
|
|
azureStorageClient, err := azure.NewStorageClient(storageAccount, storageAccessKey)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTargetConfig, fmt.Sprintf("creating the storage client failed: %v", err))
|
|
return nil
|
|
}
|
|
|
|
storageContainer := "imagebuilder"
|
|
|
|
logWithId.Info("[Azure] 📦 Ensuring that we have a storage container")
|
|
err = azureStorageClient.CreateStorageContainerIfNotExist(ctx, storageAccount, storageContainer)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTargetConfig, fmt.Sprintf("cannot create a storage container: %v", err))
|
|
return nil
|
|
}
|
|
|
|
blobName := args.Targets[0].ImageName
|
|
if !strings.HasSuffix(blobName, ".vhd") {
|
|
blobName += ".vhd"
|
|
}
|
|
|
|
logWithId.Info("[Azure] ⬆ Uploading the image")
|
|
err = azureStorageClient.UploadPageBlob(
|
|
azure.BlobMetadata{
|
|
StorageAccount: storageAccount,
|
|
ContainerName: storageContainer,
|
|
BlobName: blobName,
|
|
},
|
|
path.Join(outputDirectory, exportPath, options.Filename),
|
|
azure.DefaultUploadThreads,
|
|
)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, fmt.Sprintf("uploading the image failed: %v", err))
|
|
return nil
|
|
}
|
|
|
|
logWithId.Info("[Azure] 📝 Registering the image")
|
|
err = c.RegisterImage(
|
|
ctx,
|
|
options.SubscriptionID,
|
|
options.ResourceGroup,
|
|
storageAccount,
|
|
storageContainer,
|
|
blobName,
|
|
args.Targets[0].ImageName,
|
|
options.Location,
|
|
)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorImportingImage, fmt.Sprintf("registering the image failed: %v", err))
|
|
return nil
|
|
}
|
|
|
|
logWithId.Info("[Azure] 🎉 Image uploaded and registered!")
|
|
|
|
osbuildJobResult.TargetResults = append(osbuildJobResult.TargetResults, target.NewAzureImageTargetResult(&target.AzureImageTargetResultOptions{
|
|
ImageName: args.Targets[0].ImageName,
|
|
}))
|
|
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
case *target.OCITargetOptions:
|
|
// create an ociClient uploader with a valid storage client
|
|
var ociClient oci.Client
|
|
ociClient, err = oci.NewClient(&oci.ClientParams{
|
|
User: options.User,
|
|
Region: options.Region,
|
|
Tenancy: options.Tenancy,
|
|
Fingerprint: options.Fingerprint,
|
|
PrivateKey: options.PrivateKey,
|
|
})
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
log.Print("[OCI] 🔑 Logged in OCI")
|
|
log.Print("[OCI] ⬆ Uploading the image")
|
|
file, err := os.Open(path.Join(outputDirectory, exportPath, options.FileName))
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
defer file.Close()
|
|
i, _ := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
|
imageID, err := ociClient.Upload(
|
|
fmt.Sprintf("osbuild-upload-%d", i),
|
|
options.Bucket,
|
|
options.Namespace,
|
|
file,
|
|
options.Compartment,
|
|
args.Targets[0].ImageName,
|
|
)
|
|
if err != nil {
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
|
return nil
|
|
}
|
|
log.Print("[OCI] 🎉 Image uploaded and registered!")
|
|
|
|
osbuildJobResult.TargetResults = append(
|
|
osbuildJobResult.TargetResults,
|
|
target.NewOCITargetResult(&target.OCITargetResultOptions{ImageID: imageID}),
|
|
)
|
|
osbuildJobResult.Success = true
|
|
osbuildJobResult.UploadStatus = "success"
|
|
default:
|
|
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTarget, fmt.Sprintf("invalid target type: %s", args.Targets[0].Name))
|
|
return nil
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|