osbuild-service-maintenance: Clean up expired images
This commit is contained in:
parent
742e0e6616
commit
c43ad2b22a
23 changed files with 899 additions and 32 deletions
1
Makefile
1
Makefile
|
|
@ -116,6 +116,7 @@ build:
|
|||
go build -o bin/osbuild-upload-aws ./cmd/osbuild-upload-aws/
|
||||
go build -o bin/osbuild-upload-gcp ./cmd/osbuild-upload-gcp/
|
||||
go build -o bin/osbuild-mock-openid-provider ./cmd/osbuild-mock-openid-provider
|
||||
go build -o bin/osbuild-service-maintenance ./cmd/osbuild-service-maintenance
|
||||
go test -c -tags=integration -o bin/osbuild-composer-cli-tests ./cmd/osbuild-composer-cli-tests/main_test.go
|
||||
go test -c -tags=integration -o bin/osbuild-weldr-tests ./internal/client/
|
||||
go test -c -tags=integration -o bin/osbuild-dnf-json-tests ./cmd/osbuild-dnf-json-tests/main_test.go
|
||||
|
|
|
|||
|
|
@ -6,18 +6,21 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/jobqueue"
|
||||
"github.com/osbuild/osbuild-composer/internal/jobqueue/dbjobqueue"
|
||||
"github.com/osbuild/osbuild-composer/internal/jobqueue/jobqueuetest"
|
||||
)
|
||||
|
||||
func TestJobQueueInterface(t *testing.T) {
|
||||
jobqueuetest.TestJobQueue(t, func() (jobqueue.JobQueue, func(), error) {
|
||||
url := "postgres://postgres:foobar@localhost:5432/osbuildcomposer"
|
||||
const url = "postgres://postgres:foobar@localhost:5432/osbuildcomposer"
|
||||
|
||||
func TestJobQueueInterface(t *testing.T) {
|
||||
makeJobQueue := func() (jobqueue.JobQueue, func(), error) {
|
||||
// clear db before each run
|
||||
conn, err := pgx.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
|
|
@ -43,5 +46,176 @@ func TestJobQueueInterface(t *testing.T) {
|
|||
q.Close()
|
||||
}
|
||||
return q, stop, nil
|
||||
})
|
||||
}
|
||||
|
||||
jobqueuetest.TestJobQueue(t, makeJobQueue)
|
||||
|
||||
wrap := func(f func(t *testing.T, q *dbjobqueue.DBJobQueue)) func(*testing.T) {
|
||||
q, stop, err := makeJobQueue()
|
||||
require.NoError(t, err)
|
||||
return func(t *testing.T) {
|
||||
defer stop() // use defer because f() might call testing.T.FailNow()
|
||||
dbq, ok := q.(*dbjobqueue.DBJobQueue)
|
||||
require.True(t, ok)
|
||||
f(t, dbq)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("maintenance-query-jobs-before", wrap(testJobsUptoByType))
|
||||
t.Run("maintenance-delete-job-and-dependencies", wrap(testDeleteJobAndDependencies))
|
||||
}
|
||||
|
||||
func setFinishedAt(t *testing.T, q *dbjobqueue.DBJobQueue, id uuid.UUID, finished time.Time) {
|
||||
conn, err := pgx.Connect(context.Background(), url)
|
||||
require.NoError(t, err)
|
||||
defer conn.Close(context.Background())
|
||||
|
||||
started := finished.Add(-time.Second)
|
||||
queued := started.Add(-time.Second)
|
||||
|
||||
_, err = conn.Exec(context.Background(), "UPDATE jobs SET queued_at = $1, started_at = $2, finished_at = $3 WHERE id = $4", queued, started, finished, id)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testJobsUptoByType(t *testing.T, q *dbjobqueue.DBJobQueue) {
|
||||
date80 := time.Date(1980, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
date85 := time.Date(1985, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
date90 := time.Date(1990, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
id80, err := q.Enqueue("octopus", nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id80)
|
||||
_,_,_,_,_, err = q.Dequeue(context.Background(), []string{"octopus"})
|
||||
require.NoError(t, err)
|
||||
err = q.FinishJob(id80, nil)
|
||||
require.NoError(t, err)
|
||||
setFinishedAt(t, q, id80, date80)
|
||||
|
||||
id85, err := q.Enqueue("octopus", nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id85)
|
||||
_,_,_,_,_, err = q.Dequeue(context.Background(), []string{"octopus"})
|
||||
require.NoError(t, err)
|
||||
err = q.FinishJob(id85, nil)
|
||||
require.NoError(t, err)
|
||||
setFinishedAt(t, q, id85, date85)
|
||||
|
||||
ids, err := q.JobsUptoByType([]string{"octopus"}, date85)
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t, []uuid.UUID{id80}, ids["octopus"])
|
||||
|
||||
ids, err = q.JobsUptoByType([]string{"octopus"}, date90)
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t, []uuid.UUID{id80, id85}, ids["octopus"])
|
||||
}
|
||||
|
||||
func testDeleteJobAndDependencies(t *testing.T, q *dbjobqueue.DBJobQueue) {
|
||||
// id1 -> id2 -> id3
|
||||
id1, err := q.Enqueue("octopus", nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id1)
|
||||
id2, err := q.Enqueue("octopus", nil, []uuid.UUID{id1})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id2)
|
||||
id3, err := q.Enqueue("octopus", nil, []uuid.UUID{id2})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id3)
|
||||
|
||||
c1, err := q.Enqueue("octopus", nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, c1)
|
||||
c2, err := q.Enqueue("octopus", nil, []uuid.UUID{c1})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, c2)
|
||||
c3, err := q.Enqueue("octopus", nil, []uuid.UUID{c2})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, c3)
|
||||
controls := []uuid.UUID{c1, c2, c3}
|
||||
|
||||
_,_,_, err = q.Job(c1)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, q.DeleteJobIncludingDependencies(id3))
|
||||
for _, id := range []uuid.UUID{id1, id2, id3} {
|
||||
_,_,_, err = q.Job(id)
|
||||
require.ErrorIs(t, err, jobqueue.ErrNotExist)
|
||||
}
|
||||
|
||||
// controls should still exist
|
||||
for _, c := range controls {
|
||||
_,_,_, err = q.Job(c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// id1 -> id2 -> id4 && id3 -> id4
|
||||
id1, err = q.Enqueue("octopus", nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id1)
|
||||
id2, err = q.Enqueue("octopus", nil, []uuid.UUID{id1})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id2)
|
||||
id3, err = q.Enqueue("octopus", nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id3)
|
||||
id4, err := q.Enqueue("octopus", nil, []uuid.UUID{id2, id3})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id4)
|
||||
|
||||
require.NoError(t, q.DeleteJobIncludingDependencies(id4))
|
||||
for _, id := range []uuid.UUID{id1, id2, id3, id4} {
|
||||
_,_,_, err = q.Job(id)
|
||||
require.ErrorIs(t, err, jobqueue.ErrNotExist)
|
||||
}
|
||||
|
||||
// controls should still exist
|
||||
for _, c := range controls {
|
||||
_,_,_, err = q.Job(c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// id1 has 2 dependants, and the maintenance queries currently do not account for this
|
||||
// situation as it does not occur in the service. This should be changed once we allow
|
||||
// multiple build job per depsolve job, and the depsolve job should only be removed once all
|
||||
// the build jobs have been dealt with.
|
||||
id1, err = q.Enqueue("octopus", nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id1)
|
||||
id2a, err := q.Enqueue("octopus", nil, []uuid.UUID{id1})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id2a)
|
||||
id2b, err := q.Enqueue("octopus", nil, []uuid.UUID{id1})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id2b)
|
||||
id3, err = q.Enqueue("octopus", nil, []uuid.UUID{id2a})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uuid.Nil, id3)
|
||||
|
||||
require.NoError(t, q.DeleteJobIncludingDependencies(id3))
|
||||
for _, id := range []uuid.UUID{id1, id2a, id3} {
|
||||
_,_,_, err = q.Job(id)
|
||||
require.ErrorIs(t, err, jobqueue.ErrNotExist)
|
||||
}
|
||||
|
||||
// id2b still exists
|
||||
_,_,_, err = q.Job(id2b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// id2b can still be deleted with it's dependencies missing
|
||||
require.NoError(t, q.DeleteJobIncludingDependencies(id2b))
|
||||
_,_,_, err = q.Job(id2b)
|
||||
require.ErrorIs(t, err, jobqueue.ErrNotExist)
|
||||
|
||||
// controls should still exist
|
||||
for _, c := range controls {
|
||||
_,_,_, err = q.Job(c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, q.DeleteJobIncludingDependencies(uuid.Nil))
|
||||
// controls should still exist
|
||||
for _, c := range controls {
|
||||
_,_,_, err = q.Job(c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
72
cmd/osbuild-service-maintenance/aws.go
Normal file
72
cmd/osbuild-service-maintenance/aws.go
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/cloud/awscloud"
|
||||
)
|
||||
|
||||
func AWSCleanup(maxConcurrentRequests int, dryRun bool, accessKeyID, accessKey, region string, cutoff time.Time) error {
|
||||
a, err := awscloud.New(region, accessKeyID, accessKey, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
sem := semaphore.NewWeighted(int64(maxConcurrentRequests))
|
||||
images, err := a.DescribeImagesByTag("Name", "composer-api-*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for index, image := range images {
|
||||
// TODO are these actual concerns?
|
||||
if image.ImageId == nil {
|
||||
logrus.Infof("ImageId is nil %v", image)
|
||||
continue
|
||||
}
|
||||
if image.CreationDate == nil {
|
||||
logrus.Infof("Image %v has nil creationdate", *image.ImageId)
|
||||
continue
|
||||
}
|
||||
|
||||
created, err := time.Parse(time.RFC3339, *image.CreationDate)
|
||||
if err != nil {
|
||||
logrus.Infof("Unable to parse date %s for image %s", *image.CreationDate, *image.ImageId)
|
||||
continue
|
||||
}
|
||||
|
||||
if !created.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
logrus.Infof("Dry run, aws image %s in region %s, with creation date %s would be removed", *image.ImageId, region, *image.CreationDate)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = sem.Acquire(context.Background(), 1); err != nil {
|
||||
logrus.Errorf("Error acquiring semaphore: %v", err)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int) {
|
||||
defer sem.Release(1)
|
||||
defer wg.Done()
|
||||
|
||||
err := a.RemoveSnapshotAndDeregisterImage(images[i])
|
||||
if err != nil {
|
||||
logrus.Errorf("Cleanup for image %s in region %s failed", *images[i].ImageId, region)
|
||||
}
|
||||
}(index)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
55
cmd/osbuild-service-maintenance/config.go
Normal file
55
cmd/osbuild-service-maintenance/config.go
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Do not write this config to logs or stdout, it contains secrets!
|
||||
type Config struct {
|
||||
DryRun string `env:"DRY_RUN"`
|
||||
MaxConcurrentRequests string `env:"MAX_CONCURRENT_REQUESTS"`
|
||||
PGHost string `env:"PGHOST"`
|
||||
PGPort string `env:"PGPORT"`
|
||||
PGDatabase string `env:"PGDATABASE"`
|
||||
PGUser string `env:"PGUSER"`
|
||||
PGPassword string `env:"PGPASSWORD"`
|
||||
PGSSLMode string `env:"PGSSLMODE"`
|
||||
GoogleApplicationCreds string `env:"GOOGLE_APPLICATION_CREDENTIALS"`
|
||||
AWSAccessKeyID string `env:"AWS_ACCESS_KEY_ID"`
|
||||
AWSSecretAccessKey string `env:"AWS_SECRET_ACCESS_KEY"`
|
||||
}
|
||||
|
||||
// *string means the value is not required
|
||||
// string means the value is required and should have a default value
|
||||
func LoadConfigFromEnv(intf interface{}) error {
|
||||
t := reflect.TypeOf(intf).Elem()
|
||||
v := reflect.ValueOf(intf).Elem()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
fieldT := t.Field(i)
|
||||
fieldV := v.Field(i)
|
||||
key, ok := fieldT.Tag.Lookup("env")
|
||||
if !ok {
|
||||
return fmt.Errorf("No env tag in config field")
|
||||
}
|
||||
|
||||
confV, ok := os.LookupEnv(key)
|
||||
kind := fieldV.Kind()
|
||||
if ok {
|
||||
switch kind {
|
||||
case reflect.Ptr:
|
||||
if fieldT.Type.Elem().Kind() != reflect.String {
|
||||
return fmt.Errorf("Unsupported type")
|
||||
}
|
||||
fieldV.Set(reflect.ValueOf(&confV))
|
||||
case reflect.String:
|
||||
fieldV.SetString(confV)
|
||||
default:
|
||||
return fmt.Errorf("Unsupported type")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
66
cmd/osbuild-service-maintenance/gcp.go
Normal file
66
cmd/osbuild-service-maintenance/gcp.go
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/cloud/gcp"
|
||||
)
|
||||
|
||||
func GCPCleanup(maxConcurrentRequests int, dryRun bool, cutoff time.Time) error {
|
||||
g, err := gcp.New(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sem := semaphore.NewWeighted(int64(maxConcurrentRequests))
|
||||
var wg sync.WaitGroup
|
||||
removeImageOlderThan := func(images *compute.ImageList) error {
|
||||
for _, image := range images.Items {
|
||||
created, err := time.Parse(time.RFC3339, image.CreationTimestamp)
|
||||
if err != nil {
|
||||
logrus.Errorf("Unable to parse image %s(%d)'s creation timestamp: %v", image.Name, image.Id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !created.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
logrus.Infof("Dry run, gcp image %s(%d), with creation date %v would be removed", image.Name, image.Id, created)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = sem.Acquire(context.Background(), 1); err != nil {
|
||||
logrus.Errorf("Error acquiring semaphore: %v", err)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
|
||||
go func(id string) {
|
||||
defer sem.Release(1)
|
||||
defer wg.Done()
|
||||
|
||||
err = g.ComputeImageDelete(context.Background(), id)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error deleting image %s created at %v", id, created)
|
||||
}
|
||||
}(fmt.Sprintf("%d", image.Id))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = g.ComputeExecuteFunctionForImages(context.Background(), removeImageOlderThan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
108
cmd/osbuild-service-maintenance/main.go
Normal file
108
cmd/osbuild-service-maintenance/main.go
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/jobqueue/dbjobqueue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logrus.SetReportCaller(true)
|
||||
|
||||
archs := []string{"x86_64"}
|
||||
jobType := "osbuild"
|
||||
// 14 days
|
||||
cutoff := time.Now().Add(-(time.Hour * 24 * 14))
|
||||
logrus.Infof("Cutoff date: %v", cutoff)
|
||||
|
||||
var conf Config
|
||||
err := LoadConfigFromEnv(&conf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
maxCReqs, err := strconv.Atoi(conf.MaxConcurrentRequests)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dryRun, err := strconv.ParseBool(conf.DryRun)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
logrus.Info("Dry run, no state will be changed")
|
||||
}
|
||||
|
||||
dbURL := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=%s",
|
||||
conf.PGUser,
|
||||
conf.PGPassword,
|
||||
conf.PGHost,
|
||||
conf.PGPort,
|
||||
conf.PGDatabase,
|
||||
conf.PGSSLMode,
|
||||
)
|
||||
jobs, err := dbjobqueue.New(dbURL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
logrus.Info("Cleaning up AWS")
|
||||
err := AWSCleanup(maxCReqs, dryRun, conf.AWSAccessKeyID, conf.AWSSecretAccessKey, "us-east-1", cutoff)
|
||||
if err != nil {
|
||||
logrus.Errorf("AWS cleanup failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
logrus.Info("Cleaning up GCP")
|
||||
if conf.GoogleApplicationCreds == "" {
|
||||
logrus.Error("GCP credentials not specified")
|
||||
return
|
||||
}
|
||||
err = GCPCleanup(maxCReqs, dryRun, cutoff)
|
||||
if err != nil {
|
||||
logrus.Errorf("GCP Cleanup failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
logrus.Info("🦀🦀🦀 cloud cleanup done 🦀🦀🦀")
|
||||
|
||||
var jobTypes []string
|
||||
for _, a := range archs {
|
||||
jobTypes = append(jobTypes, fmt.Sprintf("%s:%s", jobType, a))
|
||||
}
|
||||
|
||||
jobsByType, err := jobs.JobsUptoByType(jobTypes, cutoff)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error querying jobs: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range jobsByType {
|
||||
logrus.Infof("Deleting jobs and their dependencies of type %v", k)
|
||||
if dryRun {
|
||||
logrus.Infof("Dry run, skipping deletion of jobs: %v", v)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, jobId := range v {
|
||||
err = jobs.DeleteJobIncludingDependencies(jobId)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error deleting job: %v", jobId)
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Info("🦀🦀🦀 dbqueue cleanup done 🦀🦀🦀")
|
||||
}
|
||||
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/awsupload"
|
||||
"github.com/osbuild/osbuild-composer/internal/cloud/awscloud"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
@ -32,7 +32,7 @@ func main() {
|
|||
flag.StringVar(&arch, "arch", "", "arch (x86_64 or aarch64)")
|
||||
flag.Parse()
|
||||
|
||||
a, err := awsupload.New(region, accessKeyID, secretAccessKey, sessionToken)
|
||||
a, err := awscloud.New(region, accessKeyID, secretAccessKey, sessionToken)
|
||||
if err != nil {
|
||||
println(err.Error())
|
||||
return
|
||||
|
|
|
|||
|
|
@ -11,11 +11,11 @@ import (
|
|||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/cloud/awscloud"
|
||||
"github.com/osbuild/osbuild-composer/internal/cloud/gcp"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
osbuild "github.com/osbuild/osbuild-composer/internal/osbuild2"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/awsupload"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/azure"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/koji"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/vmware"
|
||||
|
|
@ -37,14 +37,14 @@ func appendTargetError(res *worker.OSBuildJobResult, err error) {
|
|||
res.TargetErrors = append(res.TargetErrors, errStr)
|
||||
}
|
||||
|
||||
// Returns an *awsupload.AWS object with the credentials of the request. If they
|
||||
// Returns an *awscloud.AWS object with the credentials of the request. If they
|
||||
// are not accessible, then try to use the one obtained in the worker
|
||||
// configuration.
|
||||
func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string, token string) (*awsupload.AWS, error) {
|
||||
func (impl *OSBuildJobImpl) getAWS(region string, accessId string, secret string, token string) (*awscloud.AWS, error) {
|
||||
if accessId != "" && secret != "" {
|
||||
return awsupload.New(region, accessId, secret, token)
|
||||
return awscloud.New(region, accessId, secret, token)
|
||||
} else {
|
||||
return awsupload.NewFromFile(impl.AWSCreds, region)
|
||||
return awscloud.NewFromFile(impl.AWSCreds, region)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
11
distribution/Dockerfile-ubi-maintenance
Normal file
11
distribution/Dockerfile-ubi-maintenance
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
FROM registry.access.redhat.com/ubi8/go-toolset:latest AS builder
|
||||
COPY . .
|
||||
ENV GOFLAGS=-mod=vendor
|
||||
RUN go install ./cmd/osbuild-service-maintenance/
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi-minimal:latest
|
||||
RUN mkdir -p "/usr/libexec/osbuild-composer"
|
||||
RUN mkdir -p "/etc/osbuild-composer/"
|
||||
COPY --from=builder /opt/app-root/src/go/bin/osbuild-service-maintenance /usr/libexec/osbuild-composer/osbuild-service-maintenance
|
||||
ENTRYPOINT ["/usr/libexec/osbuild-composer/osbuild-service-maintenance"]
|
||||
|
||||
1
go.mod
1
go.mod
|
|
@ -47,6 +47,7 @@ require (
|
|||
github.com/vmware/govmomi v0.26.1
|
||||
golang.org/x/mod v0.5.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678
|
||||
google.golang.org/api v0.58.0
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0
|
||||
|
|
|
|||
1
go.sum
1
go.sum
|
|
@ -813,6 +813,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/cloud/awscloud"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/awsupload"
|
||||
)
|
||||
|
||||
type awsCredentials struct {
|
||||
|
|
@ -91,7 +91,7 @@ func wrapErrorf(innerError error, format string, a ...interface{}) error {
|
|||
// The s3 key is never returned - the same thing is done in osbuild-composer,
|
||||
// the user has no way of getting the s3 key.
|
||||
func UploadImageToAWS(c *awsCredentials, imagePath string, imageName string) error {
|
||||
uploader, err := awsupload.New(c.Region, c.AccessKeyId, c.SecretAccessKey, c.sessionToken)
|
||||
uploader, err := awscloud.New(c.Region, c.AccessKeyId, c.SecretAccessKey, c.sessionToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create aws uploader: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package awsupload
|
||||
package awscloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
|
@ -301,6 +301,56 @@ func (a *AWS) Register(name, bucket, key string, shareWith []string, rpmArch str
|
|||
return registerOutput.ImageId, nil
|
||||
}
|
||||
|
||||
func (a *AWS) RemoveSnapshotAndDeregisterImage(image *ec2.Image) error {
|
||||
if image == nil {
|
||||
return fmt.Errorf("image is nil")
|
||||
}
|
||||
|
||||
var snapshots []*string
|
||||
for _, bdm := range image.BlockDeviceMappings {
|
||||
snapshots = append(snapshots, bdm.Ebs.SnapshotId)
|
||||
}
|
||||
|
||||
_, err := a.ec2.DeregisterImage(
|
||||
&ec2.DeregisterImageInput{
|
||||
ImageId: image.ImageId,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, s := range snapshots {
|
||||
_, err = a.ec2.DeleteSnapshot(
|
||||
&ec2.DeleteSnapshotInput{
|
||||
SnapshotId: s,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
// TODO return err?
|
||||
log.Println("Unable to remove snapshot", s)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// For service maintenance images are discovered by the "Name:composer-api-*" tag filter. Currently
|
||||
// all image names in the service are generated, so they're guaranteed to be unique as well. If
|
||||
// users are ever allowed to name their images, an extra tag should be added.
|
||||
func (a *AWS) DescribeImagesByTag(tagKey, tagValue string) ([]*ec2.Image, error) {
|
||||
imgs, err := a.ec2.DescribeImages(
|
||||
&ec2.DescribeImagesInput{
|
||||
Filters: []*ec2.Filter{
|
||||
{
|
||||
Name: aws.String(fmt.Sprintf("tag:%s", tagKey)),
|
||||
Values: []*string{aws.String(tagValue)},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
return imgs.Images, err
|
||||
}
|
||||
|
||||
func (a *AWS) S3ObjectPresignedURL(bucket, objectKey string) (string, error) {
|
||||
log.Printf("[AWS] 📋 Generating Presigned URL for S3 object %s/%s", bucket, objectKey)
|
||||
req, _ := a.s3.GetObjectRequest(&s3.GetObjectInput{
|
||||
|
|
@ -265,17 +265,31 @@ func (g *GCP) ComputeImageShare(ctx context.Context, imageName string, shareWith
|
|||
//
|
||||
// Uses:
|
||||
// - Compute Engine API
|
||||
func (g *GCP) ComputeImageDelete(ctx context.Context, image string) error {
|
||||
func (g *GCP) ComputeImageDelete(ctx context.Context, resourceId string) error {
|
||||
computeService, err := compute.NewService(ctx, option.WithCredentials(g.creds))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get Compute Engine client: %v", err)
|
||||
}
|
||||
|
||||
_, err = computeService.Images.Delete(g.creds.ProjectID, image).Context(ctx).Do()
|
||||
_, err = computeService.Images.Delete(g.creds.ProjectID, resourceId).Context(ctx).Do()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ComputeExecuteFunctionForImages will pass all the compute images in the account to a function,
|
||||
// which is able to iterate over the images. Useful if something needs to be execute for each image.
|
||||
// Uses:
|
||||
// - Compute Engine API
|
||||
func (g *GCP) ComputeExecuteFunctionForImages(ctx context.Context, f func(*compute.ImageList) error) error {
|
||||
computeService, err := compute.NewService(ctx, option.WithCredentials(g.creds))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get Compute Engine client: %v", err)
|
||||
}
|
||||
|
||||
imagesService := compute.NewImagesService(computeService)
|
||||
return imagesService.List(g.creds.ProjectID).Pages(ctx, f)
|
||||
}
|
||||
|
||||
// ComputeInstanceDelete deletes a Compute Engine instance with the given name and
|
||||
// running in the given zone. If the instance existed and was successfully deleted,
|
||||
// no error is returned.
|
||||
|
|
|
|||
|
|
@ -263,6 +263,10 @@ func (h *apiHandlers) PostCompose(ctx echo.Context) error {
|
|||
return HTTPError(ErrorJSONUnMarshallingError)
|
||||
}
|
||||
|
||||
// For service maintenance, images are discovered by the "Name:composer-api-*"
|
||||
// tag filter. Currently all image names in the service are generated, so they're
|
||||
// guaranteed to be unique as well. If users are ever allowed to name their images,
|
||||
// an extra tag should be added.
|
||||
key := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewAWSTarget(&target.AWSTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
|
|
|
|||
|
|
@ -96,27 +96,50 @@ const (
|
|||
sqlDeleteHeartbeat = `
|
||||
DELETE FROM heartbeats
|
||||
WHERE id = $1`
|
||||
|
||||
// Maintenance queries
|
||||
sqlQueryJobsUptoByType = `
|
||||
SELECT array_agg(id), type
|
||||
FROM jobs
|
||||
WHERE type = ANY($1) AND finished_at < $2
|
||||
GROUP BY type`
|
||||
sqlQueryDepedenciesRecursively = `
|
||||
WITH RECURSIVE dependencies(d) AS (
|
||||
SELECT dependency_id
|
||||
FROM job_dependencies
|
||||
WHERE job_id = $1
|
||||
UNION ALL
|
||||
SELECT dependency_id
|
||||
FROM dependencies, job_dependencies
|
||||
WHERE job_dependencies.job_id = d )
|
||||
SELECT * FROM dependencies`
|
||||
sqlDeleteJobDependencies = `
|
||||
DELETE FROM job_dependencies
|
||||
WHERE dependency_id = ANY($1)`
|
||||
sqlDeleteJobs = `
|
||||
DELETE FROM jobs
|
||||
WHERE id = ANY($1)`
|
||||
)
|
||||
|
||||
type dbJobQueue struct {
|
||||
type DBJobQueue struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// Create a new dbJobQueue object for `url`.
|
||||
func New(url string) (*dbJobQueue, error) {
|
||||
// Create a new DBJobQueue object for `url`.
|
||||
func New(url string) (*DBJobQueue, error) {
|
||||
pool, err := pgxpool.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error establishing connection: %v", err)
|
||||
}
|
||||
|
||||
return &dbJobQueue{pool}, nil
|
||||
return &DBJobQueue{pool}, nil
|
||||
}
|
||||
|
||||
func (q *dbJobQueue) Close() {
|
||||
func (q *DBJobQueue) Close() {
|
||||
q.pool.Close()
|
||||
}
|
||||
|
||||
func (q *dbJobQueue) Enqueue(jobType string, args interface{}, dependencies []uuid.UUID) (uuid.UUID, error) {
|
||||
func (q *DBJobQueue) Enqueue(jobType string, args interface{}, dependencies []uuid.UUID) (uuid.UUID, error) {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return uuid.Nil, fmt.Errorf("error connecting to database: %v", err)
|
||||
|
|
@ -162,7 +185,7 @@ func (q *dbJobQueue) Enqueue(jobType string, args interface{}, dependencies []uu
|
|||
return id, nil
|
||||
}
|
||||
|
||||
func (q *dbJobQueue) Dequeue(ctx context.Context, jobTypes []string) (uuid.UUID, uuid.UUID, []uuid.UUID, string, json.RawMessage, error) {
|
||||
func (q *DBJobQueue) Dequeue(ctx context.Context, jobTypes []string) (uuid.UUID, uuid.UUID, []uuid.UUID, string, json.RawMessage, error) {
|
||||
// Return early if the context is already canceled.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return uuid.Nil, uuid.Nil, nil, "", nil, jobqueue.ErrDequeueTimeout
|
||||
|
|
@ -221,7 +244,7 @@ func (q *dbJobQueue) Dequeue(ctx context.Context, jobTypes []string) (uuid.UUID,
|
|||
|
||||
return id, token, dependencies, jobType, args, nil
|
||||
}
|
||||
func (q *dbJobQueue) DequeueByID(ctx context.Context, id uuid.UUID) (uuid.UUID, []uuid.UUID, string, json.RawMessage, error) {
|
||||
func (q *DBJobQueue) DequeueByID(ctx context.Context, id uuid.UUID) (uuid.UUID, []uuid.UUID, string, json.RawMessage, error) {
|
||||
// Return early if the context is already canceled.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return uuid.Nil, nil, "", nil, jobqueue.ErrDequeueTimeout
|
||||
|
|
@ -260,7 +283,7 @@ func (q *dbJobQueue) DequeueByID(ctx context.Context, id uuid.UUID) (uuid.UUID,
|
|||
return token, dependencies, jobType, args, nil
|
||||
}
|
||||
|
||||
func (q *dbJobQueue) FinishJob(id uuid.UUID, result interface{}) error {
|
||||
func (q *DBJobQueue) FinishJob(id uuid.UUID, result interface{}) error {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error connecting to database: %v", err)
|
||||
|
|
@ -327,7 +350,7 @@ func (q *dbJobQueue) FinishJob(id uuid.UUID, result interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (q *dbJobQueue) CancelJob(id uuid.UUID) error {
|
||||
func (q *DBJobQueue) CancelJob(id uuid.UUID) error {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error connecting to database: %v", err)
|
||||
|
|
@ -348,7 +371,7 @@ func (q *dbJobQueue) CancelJob(id uuid.UUID) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (q *dbJobQueue) JobStatus(id uuid.UUID) (result json.RawMessage, queued, started, finished time.Time, canceled bool, deps []uuid.UUID, err error) {
|
||||
func (q *DBJobQueue) JobStatus(id uuid.UUID) (result json.RawMessage, queued, started, finished time.Time, canceled bool, deps []uuid.UUID, err error) {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return
|
||||
|
|
@ -380,7 +403,7 @@ func (q *dbJobQueue) JobStatus(id uuid.UUID) (result json.RawMessage, queued, st
|
|||
}
|
||||
|
||||
// Job returns all the parameters that define a job (everything provided during Enqueue).
|
||||
func (q *dbJobQueue) Job(id uuid.UUID) (jobType string, args json.RawMessage, dependencies []uuid.UUID, err error) {
|
||||
func (q *DBJobQueue) Job(id uuid.UUID) (jobType string, args json.RawMessage, dependencies []uuid.UUID, err error) {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return
|
||||
|
|
@ -400,7 +423,7 @@ func (q *dbJobQueue) Job(id uuid.UUID) (jobType string, args json.RawMessage, de
|
|||
}
|
||||
|
||||
// Find job by token, this will return an error if the job hasn't been dequeued
|
||||
func (q *dbJobQueue) IdFromToken(token uuid.UUID) (id uuid.UUID, err error) {
|
||||
func (q *DBJobQueue) IdFromToken(token uuid.UUID) (id uuid.UUID, err error) {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return uuid.Nil, fmt.Errorf("error establishing connection: %v", err)
|
||||
|
|
@ -418,7 +441,7 @@ func (q *dbJobQueue) IdFromToken(token uuid.UUID) (id uuid.UUID, err error) {
|
|||
}
|
||||
|
||||
// Get a list of tokens which haven't been updated in the specified time frame
|
||||
func (q *dbJobQueue) Heartbeats(olderThan time.Duration) (tokens []uuid.UUID) {
|
||||
func (q *DBJobQueue) Heartbeats(olderThan time.Duration) (tokens []uuid.UUID) {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return
|
||||
|
|
@ -449,7 +472,7 @@ func (q *dbJobQueue) Heartbeats(olderThan time.Duration) (tokens []uuid.UUID) {
|
|||
}
|
||||
|
||||
// Reset the last heartbeat time to time.Now()
|
||||
func (q *dbJobQueue) RefreshHeartbeat(token uuid.UUID) {
|
||||
func (q *DBJobQueue) RefreshHeartbeat(token uuid.UUID) {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return
|
||||
|
|
@ -465,7 +488,7 @@ func (q *dbJobQueue) RefreshHeartbeat(token uuid.UUID) {
|
|||
}
|
||||
}
|
||||
|
||||
func (q *dbJobQueue) jobDependencies(ctx context.Context, conn *pgxpool.Conn, id uuid.UUID) ([]uuid.UUID, error) {
|
||||
func (q *DBJobQueue) jobDependencies(ctx context.Context, conn *pgxpool.Conn, id uuid.UUID) ([]uuid.UUID, error) {
|
||||
rows, err := conn.Query(ctx, sqlQueryDependencies, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -488,3 +511,91 @@ func (q *dbJobQueue) jobDependencies(ctx context.Context, conn *pgxpool.Conn, id
|
|||
|
||||
return dependencies, nil
|
||||
}
|
||||
|
||||
// return map id -> jobtype ?
|
||||
func (q *DBJobQueue) JobsUptoByType(jobTypes []string, upto time.Time) (result map[string][]uuid.UUID, err error) {
|
||||
result = make(map[string][]uuid.UUID)
|
||||
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error connecting to database: %v", err)
|
||||
return
|
||||
}
|
||||
defer conn.Release()
|
||||
|
||||
rows, err := conn.Query(context.Background(), sqlQueryJobsUptoByType, jobTypes, upto)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var ids []uuid.UUID
|
||||
var jt string
|
||||
err = rows.Scan(&ids, &jt)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result[jt] = ids
|
||||
}
|
||||
err = rows.Err()
|
||||
return
|
||||
}
|
||||
|
||||
// Deletes single job and dependencies (recursively)
|
||||
func (q *DBJobQueue) DeleteJobIncludingDependencies(jobId uuid.UUID) error {
|
||||
conn, err := q.pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
|
||||
return fmt.Errorf("error connecting to database: %v", err)
|
||||
}
|
||||
defer conn.Release()
|
||||
|
||||
tx, err := conn.Begin(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error starting database transaction: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err := tx.Rollback(context.Background())
|
||||
if err != nil && !errors.As(err, &pgx.ErrTxClosed) {
|
||||
logrus.Error("error rolling back enqueue transaction: ", err)
|
||||
}
|
||||
}()
|
||||
|
||||
rows, err := conn.Query(context.Background(), sqlQueryDepedenciesRecursively, jobId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error querying the job's dependencies: %v", err)
|
||||
}
|
||||
|
||||
var dependencies []uuid.UUID
|
||||
for rows.Next() {
|
||||
var dep uuid.UUID
|
||||
err = rows.Scan(&dep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dependencies = append(dependencies, dep)
|
||||
}
|
||||
|
||||
depTag, err := conn.Exec(context.Background(), sqlDeleteJobDependencies, dependencies)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing from dependencies recursively for job %v: %v", jobId, err)
|
||||
}
|
||||
|
||||
jobAndDependencies := append(dependencies, jobId)
|
||||
jobsTag, err := conn.Exec(context.Background(), sqlDeleteJobs, jobAndDependencies)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing from jobs recursively for job %v: %v", jobId, err)
|
||||
}
|
||||
|
||||
err = tx.Commit(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to commit database transaction: %v", err)
|
||||
}
|
||||
|
||||
logrus.Infof("Removed %d rows from dependencies for job %v", depTag.RowsAffected(), jobId)
|
||||
logrus.Infof("Removed %d rows from jobs for job %v, this includes dependencies", jobsTag.RowsAffected(), jobId)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,3 +16,8 @@ docker --config="$DOCKER_CONF" login -u="$QUAY_USER" -p="$QUAY_TOKEN" quay.io
|
|||
docker --config="$DOCKER_CONF" build -f distribution/Dockerfile-ubi -t "${IMAGE_NAME}:${IMAGE_TAG}" .
|
||||
docker --config="$DOCKER_CONF" push "${IMAGE_NAME}:${IMAGE_TAG}"
|
||||
|
||||
# Maintenance image
|
||||
IMAGE_NAME="quay.io/app-sre/composer-maintenance"
|
||||
IMAGE_TAG=$(git rev-parse --short=7 HEAD)
|
||||
docker --config="$DOCKER_CONF" build -f distribution/Dockerfile-ubi-maintenance -t "${IMAGE_NAME}:${IMAGE_TAG}" .
|
||||
docker --config="$DOCKER_CONF" push "${IMAGE_NAME}:${IMAGE_TAG}"
|
||||
|
|
|
|||
3
vendor/golang.org/x/sync/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/sync/AUTHORS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/golang.org/x/sync/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/sync/CONTRIBUTORS
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
27
vendor/golang.org/x/sync/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/sync/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/sync/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/sync/PATENTS
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
136
vendor/golang.org/x/sync/semaphore/semaphore.go
generated
vendored
Normal file
136
vendor/golang.org/x/sync/semaphore/semaphore.go
generated
vendored
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package semaphore provides a weighted semaphore implementation.
|
||||
package semaphore // import "golang.org/x/sync/semaphore"
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type waiter struct {
|
||||
n int64
|
||||
ready chan<- struct{} // Closed when semaphore acquired.
|
||||
}
|
||||
|
||||
// NewWeighted creates a new weighted semaphore with the given
|
||||
// maximum combined weight for concurrent access.
|
||||
func NewWeighted(n int64) *Weighted {
|
||||
w := &Weighted{size: n}
|
||||
return w
|
||||
}
|
||||
|
||||
// Weighted provides a way to bound concurrent access to a resource.
|
||||
// The callers can request access with a given weight.
|
||||
type Weighted struct {
|
||||
size int64
|
||||
cur int64
|
||||
mu sync.Mutex
|
||||
waiters list.List
|
||||
}
|
||||
|
||||
// Acquire acquires the semaphore with a weight of n, blocking until resources
|
||||
// are available or ctx is done. On success, returns nil. On failure, returns
|
||||
// ctx.Err() and leaves the semaphore unchanged.
|
||||
//
|
||||
// If ctx is already done, Acquire may still succeed without blocking.
|
||||
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
||||
s.mu.Lock()
|
||||
if s.size-s.cur >= n && s.waiters.Len() == 0 {
|
||||
s.cur += n
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if n > s.size {
|
||||
// Don't make other Acquire calls block on one that's doomed to fail.
|
||||
s.mu.Unlock()
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
ready := make(chan struct{})
|
||||
w := waiter{n: n, ready: ready}
|
||||
elem := s.waiters.PushBack(w)
|
||||
s.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
s.mu.Lock()
|
||||
select {
|
||||
case <-ready:
|
||||
// Acquired the semaphore after we were canceled. Rather than trying to
|
||||
// fix up the queue, just pretend we didn't notice the cancelation.
|
||||
err = nil
|
||||
default:
|
||||
isFront := s.waiters.Front() == elem
|
||||
s.waiters.Remove(elem)
|
||||
// If we're at the front and there're extra tokens left, notify other waiters.
|
||||
if isFront && s.size > s.cur {
|
||||
s.notifyWaiters()
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
|
||||
case <-ready:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// TryAcquire acquires the semaphore with a weight of n without blocking.
|
||||
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
|
||||
func (s *Weighted) TryAcquire(n int64) bool {
|
||||
s.mu.Lock()
|
||||
success := s.size-s.cur >= n && s.waiters.Len() == 0
|
||||
if success {
|
||||
s.cur += n
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return success
|
||||
}
|
||||
|
||||
// Release releases the semaphore with a weight of n.
|
||||
func (s *Weighted) Release(n int64) {
|
||||
s.mu.Lock()
|
||||
s.cur -= n
|
||||
if s.cur < 0 {
|
||||
s.mu.Unlock()
|
||||
panic("semaphore: released more than held")
|
||||
}
|
||||
s.notifyWaiters()
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Weighted) notifyWaiters() {
|
||||
for {
|
||||
next := s.waiters.Front()
|
||||
if next == nil {
|
||||
break // No more waiters blocked.
|
||||
}
|
||||
|
||||
w := next.Value.(waiter)
|
||||
if s.size-s.cur < w.n {
|
||||
// Not enough tokens for the next waiter. We could keep going (to try to
|
||||
// find a waiter with a smaller request), but under load that could cause
|
||||
// starvation for large requests; instead, we leave all remaining waiters
|
||||
// blocked.
|
||||
//
|
||||
// Consider a semaphore used as a read-write lock, with N tokens, N
|
||||
// readers, and one writer. Each reader can Acquire(1) to obtain a read
|
||||
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
|
||||
// of the readers. If we allow the readers to jump ahead in the queue,
|
||||
// the writer will starve — there is always one token available for every
|
||||
// reader.
|
||||
break
|
||||
}
|
||||
|
||||
s.cur += w.n
|
||||
s.waiters.Remove(next)
|
||||
close(w.ready)
|
||||
}
|
||||
}
|
||||
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
|
|
@ -416,6 +416,9 @@ golang.org/x/oauth2/google/internal/externalaccount
|
|||
golang.org/x/oauth2/internal
|
||||
golang.org/x/oauth2/jws
|
||||
golang.org/x/oauth2/jwt
|
||||
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
## explicit
|
||||
golang.org/x/sync/semaphore
|
||||
# golang.org/x/sys v0.0.0-20210917161153-d61c044b1678
|
||||
## explicit
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue