Refactor Azure upload code to a separate package

as part of this, move also the AWS upload code to the same directory
This commit is contained in:
Martin Sehnoutka 2019-11-27 09:07:30 +01:00 committed by Tom Gundersen
parent 7b54f5cfdc
commit 1135e9fe01
7 changed files with 131 additions and 111 deletions

View file

@ -5,7 +5,7 @@ import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/osbuild/osbuild-composer/internal/awsupload"
"github.com/osbuild/osbuild-composer/internal/upload/awsupload"
)
func main() {

View file

@ -1,116 +1,14 @@
package main
import (
"bufio"
"bytes"
"context"
"flag"
"fmt"
"io"
"log"
"net/url"
"os"
"path"
"sync"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/osbuild/osbuild-composer/internal/upload/azure"
)
func handleErrors(err error) {
if err != nil {
if serr, ok := err.(azblob.StorageError); ok { // This error is a Service-specific
switch serr.ServiceCode() { // Compare serviceCode to ServiceCodeXxx constants
case azblob.ServiceCodeContainerAlreadyExists:
// This error is not fatal
fmt.Println("Received 409. Container already exists")
return
}
}
// All other error causes the program to exit
fmt.Println(err)
os.Exit(1)
}
}
type azureCredentials struct {
storageAccount string
storageAccessKey string
}
type azureImageMetadata struct {
containerName string
imageName string
}
func azureUploadImage(credentials azureCredentials, metadata azureImageMetadata, fileName string, threads int) {
// Create a default request pipeline using your storage account name and account key.
credential, err := azblob.NewSharedKeyCredential(credentials.storageAccount, credentials.storageAccessKey)
handleErrors(err)
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
// get storage account blob service URL endpoint.
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s", credentials.storageAccount, metadata.containerName))
// Create a ContainerURL object that wraps the container URL and a request
// pipeline to make requests.
containerURL := azblob.NewContainerURL(*URL, p)
// Create the container, use a never-expiring context
ctx := context.Background()
// Open the image file for reading
imageFile, err := os.Open(fileName)
handleErrors(err)
// Stat image to get the file size
stat, err := imageFile.Stat()
handleErrors(err)
// Create page blob URL. Page blob is required for VM images
blobURL := containerURL.NewPageBlobURL(metadata.imageName)
_, err = blobURL.Create(ctx, stat.Size(), 0, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
handleErrors(err)
// Create control variables
// This channel simulates behavior of a semaphore and bounds the number of parallel threads
var semaphore = make(chan int, threads)
var counter int64 = 0
// Create buffered reader to speed up the upload
reader := bufio.NewReader(imageFile)
imageSize := stat.Size()
// Run the upload
run := true
var wg sync.WaitGroup
for run {
buffer := make([]byte, azblob.PageBlobMaxUploadPagesBytes)
n, err := reader.Read(buffer)
if err != nil {
if err == io.EOF {
run = false
} else {
panic(err)
}
}
if n == 0 {
break
}
wg.Add(1)
semaphore <- 1
go func(counter int64, buffer []byte, n int) {
defer wg.Done()
_, err = blobURL.UploadPages(ctx, counter*azblob.PageBlobMaxUploadPagesBytes, bytes.NewReader(buffer[:n]), azblob.PageBlobAccessConditions{}, nil)
if err != nil {
log.Fatal(err)
}
<-semaphore
}(counter, buffer, n)
fmt.Printf("\rProgress: uploading bytest %d-%d from %d bytes", counter*azblob.PageBlobMaxUploadPagesBytes, counter*azblob.PageBlobMaxUploadPagesBytes+int64(n), imageSize)
counter++
}
wg.Wait()
}
func checkStringNotEmpty(variable string, errorMessage string) {
if variable == "" {
log.Fatal(errorMessage)
@ -137,12 +35,12 @@ func main() {
fmt.Println("Image to upload is:", fileName)
azureUploadImage(azureCredentials{
storageAccount: storageAccount,
storageAccessKey: storageAccessKey,
}, azureImageMetadata{
imageName: path.Base(fileName),
containerName: containerName,
azure.UploadImage(azure.Credentials{
StorageAccount: storageAccount,
StorageAccessKey: storageAccessKey,
}, azure.ImageMetadata{
ImageName: path.Base(fileName),
ContainerName: containerName,
}, fileName, threads)
}