cloudapi: V2
V2 is compliant with api.openshift.com design guidelines. Errors are predefined, have codes, and are queryable. All requests have an operationId set: a unique identifier which is sortable by time. This is added to the response in case of an error. All returned objects have the href, id, and kind field set.
This commit is contained in:
parent
19eb65e9fd
commit
5a9d8c792b
28 changed files with 4877 additions and 585 deletions
|
|
@ -103,7 +103,7 @@ func (c *Composer) InitWeldr(repoPaths []string, weldrListener net.Listener,
|
|||
}
|
||||
|
||||
func (c *Composer) InitAPI(cert, key string, enableJWT bool, l net.Listener) error {
|
||||
c.api = cloudapi.NewServer(c.workers, c.rpm, c.distros)
|
||||
c.api = cloudapi.NewServer(c.logger, c.workers, c.rpm, c.distros)
|
||||
c.koji = kojiapi.NewServer(c.logger, c.workers, c.rpm, c.distros)
|
||||
|
||||
clientAuth := tls.RequireAndVerifyClientCert
|
||||
|
|
@ -210,6 +210,7 @@ func (c *Composer) Start() error {
|
|||
if c.apiListener != nil {
|
||||
go func() {
|
||||
const apiRoute = "/api/composer/v1"
|
||||
const apiRouteV2 = "/api/composer/v2"
|
||||
const kojiRoute = "/api/composer-koji/v1"
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
|
@ -217,7 +218,8 @@ func (c *Composer) Start() error {
|
|||
// Add a "/" here, because http.ServeMux expects the
|
||||
// trailing slash for rooted subtrees, whereas the
|
||||
// handler functions don't.
|
||||
mux.Handle(apiRoute+"/", c.api.Handler(apiRoute))
|
||||
mux.Handle(apiRoute+"/", c.api.V1(apiRoute))
|
||||
mux.Handle(apiRouteV2+"/", c.api.V2(apiRouteV2))
|
||||
mux.Handle(kojiRoute+"/", c.koji.Handler(kojiRoute))
|
||||
mux.Handle("/metrics", promhttp.Handler().(http.HandlerFunc))
|
||||
|
||||
|
|
|
|||
1
go.mod
1
go.mod
|
|
@ -40,6 +40,7 @@ require (
|
|||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/common v0.30.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453
|
||||
github.com/vmware/govmomi v0.26.1
|
||||
|
|
|
|||
2
go.sum
2
go.sum
|
|
@ -614,6 +614,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
|
|||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ import (
|
|||
|
||||
"github.com/openshift-online/ocm-sdk-go/authentication"
|
||||
"github.com/openshift-online/ocm-sdk-go/logging"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
)
|
||||
|
||||
// When using this handler for auth, it should be run as high up as possible.
|
||||
|
|
@ -54,6 +56,12 @@ func BuildJWTAuthHandler(keysURL, caFile, aclFile string, exclude []string, next
|
|||
for _, e := range exclude {
|
||||
builder = builder.Public(e)
|
||||
}
|
||||
|
||||
// In case authentication fails, attach an OperationID
|
||||
builder = builder.OperationID(func(r *http.Request) string {
|
||||
return common.GenerateOperationID()
|
||||
})
|
||||
|
||||
handler, err = builder.Next(next).Build()
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,600 +1,34 @@
|
|||
//go:generate go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --package=cloudapi --generate types,spec,client,server -o openapi.gen.go openapi.yml
|
||||
|
||||
package cloudapi
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild1"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/prometheus"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/cloudapi/v1"
|
||||
"github.com/osbuild/osbuild-composer/internal/cloudapi/v2"
|
||||
)
|
||||
|
||||
// Server represents the state of the cloud Server
|
||||
type Server struct {
|
||||
workers *worker.Server
|
||||
rpmMetadata rpmmd.RPMMD
|
||||
distros *distroregistry.Registry
|
||||
v1 *v1.Server
|
||||
v2 *v2.Server
|
||||
}
|
||||
|
||||
type apiHandlers struct {
|
||||
server *Server
|
||||
}
|
||||
|
||||
type binder struct{}
|
||||
|
||||
// NewServer creates a new cloud server
|
||||
func NewServer(workers *worker.Server, rpmMetadata rpmmd.RPMMD, distros *distroregistry.Registry) *Server {
|
||||
func NewServer(logger *log.Logger, workers *worker.Server, rpmMetadata rpmmd.RPMMD, distros *distroregistry.Registry) *Server {
|
||||
server := &Server{
|
||||
workers: workers,
|
||||
rpmMetadata: rpmMetadata,
|
||||
distros: distros,
|
||||
v1: v1.NewServer(workers, rpmMetadata, distros),
|
||||
v2: v2.NewServer(logger, workers, rpmMetadata, distros),
|
||||
}
|
||||
return server
|
||||
}
|
||||
|
||||
// Create an http.Handler() for this server, that provides the composer API at
|
||||
// the given path.
|
||||
func (server *Server) Handler(path string) http.Handler {
|
||||
e := echo.New()
|
||||
e.Binder = binder{}
|
||||
|
||||
handler := apiHandlers{
|
||||
server: server,
|
||||
}
|
||||
RegisterHandlers(e.Group(path, server.IncRequests), &handler)
|
||||
|
||||
return e
|
||||
func (server *Server) V1(path string) http.Handler {
|
||||
return server.v1.Handler(path)
|
||||
}
|
||||
|
||||
func (b binder) Bind(i interface{}, ctx echo.Context) error {
|
||||
contentType := ctx.Request().Header["Content-Type"]
|
||||
if len(contentType) != 1 || contentType[0] != "application/json" {
|
||||
return echo.NewHTTPError(http.StatusUnsupportedMediaType, "Only 'application/json' content type is supported")
|
||||
}
|
||||
|
||||
err := json.NewDecoder(ctx.Request().Body).Decode(i)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Cannot parse request body: %s", err.Error()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) IncRequests(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
prometheus.TotalRequests.Inc()
|
||||
if strings.HasSuffix(c.Path(), "/compose") {
|
||||
prometheus.ComposeRequests.Inc()
|
||||
}
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Compose handles a new /compose POST request
|
||||
func (h *apiHandlers) Compose(ctx echo.Context) error {
|
||||
contentType := ctx.Request().Header["Content-Type"]
|
||||
if len(contentType) != 1 || contentType[0] != "application/json" {
|
||||
return echo.NewHTTPError(http.StatusUnsupportedMediaType, "Only 'application/json' content type is supported")
|
||||
}
|
||||
|
||||
var request ComposeRequest
|
||||
err := ctx.Bind(&request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
distribution := h.server.distros.GetDistro(request.Distribution)
|
||||
if distribution == nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unsupported distribution: %s", request.Distribution))
|
||||
}
|
||||
|
||||
var bp = blueprint.Blueprint{}
|
||||
err = bp.Initialize()
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to initialize blueprint")
|
||||
}
|
||||
if request.Customizations != nil && request.Customizations.Packages != nil {
|
||||
for _, p := range *request.Customizations.Packages {
|
||||
bp.Packages = append(bp.Packages, blueprint.Package{
|
||||
Name: p,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type imageRequest struct {
|
||||
manifest distro.Manifest
|
||||
arch string
|
||||
exports []string
|
||||
}
|
||||
imageRequests := make([]imageRequest, len(request.ImageRequests))
|
||||
var targets []*target.Target
|
||||
|
||||
// use the same seed for all images so we get the same IDs
|
||||
bigSeed, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Cannot generate a manifest seed: %s", err.Error()))
|
||||
}
|
||||
manifestSeed := bigSeed.Int64()
|
||||
|
||||
for i, ir := range request.ImageRequests {
|
||||
arch, err := distribution.GetArch(ir.Architecture)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unsupported architecture '%s' for distribution '%s'", ir.Architecture, request.Distribution))
|
||||
}
|
||||
imageType, err := arch.GetImageType(ir.ImageType)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unsupported image type '%s' for %s/%s", ir.ImageType, ir.Architecture, request.Distribution))
|
||||
}
|
||||
repositories := make([]rpmmd.RepoConfig, len(ir.Repositories))
|
||||
for j, repo := range ir.Repositories {
|
||||
repositories[j].RHSM = repo.Rhsm
|
||||
|
||||
if repo.Baseurl != nil {
|
||||
repositories[j].BaseURL = *repo.Baseurl
|
||||
} else if repo.Mirrorlist != nil {
|
||||
repositories[j].MirrorList = *repo.Mirrorlist
|
||||
} else if repo.Metalink != nil {
|
||||
repositories[j].Metalink = *repo.Metalink
|
||||
} else {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Must specify baseurl, mirrorlist, or metalink")
|
||||
}
|
||||
}
|
||||
|
||||
packageSets := imageType.PackageSets(bp)
|
||||
pkgSpecSets := make(map[string][]rpmmd.PackageSpec)
|
||||
for name, packages := range packageSets {
|
||||
pkgs, _, err := h.server.rpmMetadata.Depsolve(packages, repositories, distribution.ModulePlatformID(), arch.Name(), distribution.Releasever())
|
||||
if err != nil {
|
||||
var error_type int
|
||||
switch err.(type) {
|
||||
// Known DNF errors falls under BadRequest
|
||||
case *rpmmd.DNFError:
|
||||
error_type = http.StatusBadRequest
|
||||
// All other kind of errors are internal server Errors.
|
||||
// (json marshalling issues for instance)
|
||||
case error:
|
||||
error_type = http.StatusInternalServerError
|
||||
}
|
||||
return echo.NewHTTPError(error_type, fmt.Sprintf("Failed to depsolve base packages for %s/%s/%s: %s", ir.ImageType, ir.Architecture, request.Distribution, err.Error()))
|
||||
}
|
||||
pkgSpecSets[name] = pkgs
|
||||
}
|
||||
|
||||
imageOptions := distro.ImageOptions{Size: imageType.Size(0)}
|
||||
if request.Customizations != nil && request.Customizations.Subscription != nil {
|
||||
imageOptions.Subscription = &distro.SubscriptionImageOptions{
|
||||
Organization: fmt.Sprintf("%d", request.Customizations.Subscription.Organization),
|
||||
ActivationKey: request.Customizations.Subscription.ActivationKey,
|
||||
ServerUrl: request.Customizations.Subscription.ServerUrl,
|
||||
BaseUrl: request.Customizations.Subscription.BaseUrl,
|
||||
Insights: request.Customizations.Subscription.Insights,
|
||||
}
|
||||
}
|
||||
|
||||
// set default ostree ref, if one not provided
|
||||
ostreeOptions := ir.Ostree
|
||||
if ostreeOptions == nil || ostreeOptions.Ref == nil {
|
||||
imageOptions.OSTree = distro.OSTreeImageOptions{Ref: imageType.OSTreeRef()}
|
||||
} else if !ostree.VerifyRef(*ostreeOptions.Ref) {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid OSTree ref: %s", *ostreeOptions.Ref))
|
||||
} else {
|
||||
imageOptions.OSTree = distro.OSTreeImageOptions{Ref: *ostreeOptions.Ref}
|
||||
}
|
||||
|
||||
var parent string
|
||||
if ostreeOptions != nil && ostreeOptions.Url != nil {
|
||||
imageOptions.OSTree.URL = *ostreeOptions.Url
|
||||
parent, err = ostree.ResolveRef(imageOptions.OSTree.URL, imageOptions.OSTree.Ref)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Error resolving OSTree repo %s: %s", imageOptions.OSTree.URL, err.Error()))
|
||||
}
|
||||
imageOptions.OSTree.Parent = parent
|
||||
}
|
||||
|
||||
// Set the blueprint customisation to take care of the user
|
||||
var blueprintCustoms *blueprint.Customizations
|
||||
if request.Customizations != nil && request.Customizations.Users != nil {
|
||||
var userCustomizations []blueprint.UserCustomization
|
||||
for _, user := range *request.Customizations.Users {
|
||||
var groups []string
|
||||
if user.Groups != nil {
|
||||
groups = *user.Groups
|
||||
} else {
|
||||
groups = nil
|
||||
}
|
||||
userCustomizations = append(userCustomizations,
|
||||
blueprint.UserCustomization{
|
||||
Name: user.Name,
|
||||
Key: user.Key,
|
||||
Groups: groups,
|
||||
},
|
||||
)
|
||||
}
|
||||
blueprintCustoms = &blueprint.Customizations{
|
||||
User: userCustomizations,
|
||||
}
|
||||
}
|
||||
|
||||
manifest, err := imageType.Manifest(blueprintCustoms, imageOptions, repositories, pkgSpecSets, manifestSeed)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Failed to get manifest for for %s/%s/%s: %s", ir.ImageType, ir.Architecture, request.Distribution, err.Error()))
|
||||
}
|
||||
|
||||
imageRequests[i].manifest = manifest
|
||||
imageRequests[i].arch = arch.Name()
|
||||
imageRequests[i].exports = imageType.Exports()
|
||||
|
||||
uploadRequest := ir.UploadRequest
|
||||
/* oneOf is not supported by the openapi generator so marshal and unmarshal the uploadrequest based on the type */
|
||||
if uploadRequest.Type == UploadTypes_aws {
|
||||
var sessionToken string
|
||||
var awsUploadOptions AWSUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to marshal aws upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &awsUploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal aws upload request")
|
||||
}
|
||||
|
||||
var share []string
|
||||
if awsUploadOptions.Ec2.ShareWithAccounts != nil {
|
||||
share = *awsUploadOptions.Ec2.ShareWithAccounts
|
||||
}
|
||||
key := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
if awsUploadOptions.S3.SessionToken != nil {
|
||||
sessionToken = *awsUploadOptions.S3.SessionToken
|
||||
}
|
||||
t := target.NewAWSTarget(&target.AWSTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: awsUploadOptions.Region,
|
||||
AccessKeyID: awsUploadOptions.S3.AccessKeyId,
|
||||
SecretAccessKey: awsUploadOptions.S3.SecretAccessKey,
|
||||
SessionToken: sessionToken,
|
||||
Bucket: awsUploadOptions.S3.Bucket,
|
||||
Key: key,
|
||||
ShareWithAccounts: share,
|
||||
})
|
||||
if awsUploadOptions.Ec2.SnapshotName != nil {
|
||||
t.ImageName = *awsUploadOptions.Ec2.SnapshotName
|
||||
} else {
|
||||
t.ImageName = key
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_aws_s3 {
|
||||
var awsS3UploadOptions AWSS3UploadRequestOptions
|
||||
var sessionToken string
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal aws upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &awsS3UploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal aws upload request")
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
if awsS3UploadOptions.S3.SessionToken != nil {
|
||||
sessionToken = *awsS3UploadOptions.S3.SessionToken
|
||||
}
|
||||
t := target.NewAWSS3Target(&target.AWSS3TargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: awsS3UploadOptions.Region,
|
||||
AccessKeyID: awsS3UploadOptions.S3.AccessKeyId,
|
||||
SecretAccessKey: awsS3UploadOptions.S3.SecretAccessKey,
|
||||
SessionToken: sessionToken,
|
||||
Bucket: awsS3UploadOptions.S3.Bucket,
|
||||
Key: key,
|
||||
})
|
||||
t.ImageName = key
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_gcp {
|
||||
var gcpUploadOptions GCPUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to marshal gcp upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &gcpUploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal gcp upload request")
|
||||
}
|
||||
|
||||
var share []string
|
||||
if gcpUploadOptions.ShareWithAccounts != nil {
|
||||
share = *gcpUploadOptions.ShareWithAccounts
|
||||
}
|
||||
var region string
|
||||
if gcpUploadOptions.Region != nil {
|
||||
region = *gcpUploadOptions.Region
|
||||
}
|
||||
object := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewGCPTarget(&target.GCPTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: region,
|
||||
Os: "", // not exposed in cloudapi for now
|
||||
Bucket: gcpUploadOptions.Bucket,
|
||||
Object: object,
|
||||
ShareWithAccounts: share,
|
||||
})
|
||||
// Import will fail if an image with this name already exists
|
||||
if gcpUploadOptions.ImageName != nil {
|
||||
t.ImageName = *gcpUploadOptions.ImageName
|
||||
} else {
|
||||
t.ImageName = object
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_azure {
|
||||
var azureUploadOptions AzureUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to marshal azure upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &azureUploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal azure upload request")
|
||||
}
|
||||
t := target.NewAzureImageTarget(&target.AzureImageTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
TenantID: azureUploadOptions.TenantId,
|
||||
Location: azureUploadOptions.Location,
|
||||
SubscriptionID: azureUploadOptions.SubscriptionId,
|
||||
ResourceGroup: azureUploadOptions.ResourceGroup,
|
||||
})
|
||||
|
||||
if azureUploadOptions.ImageName != nil {
|
||||
t.ImageName = *azureUploadOptions.ImageName
|
||||
} else {
|
||||
// if ImageName wasn't given, generate a random one
|
||||
t.ImageName = fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Unknown upload request type, only 'aws', 'azure' and 'gcp' are supported")
|
||||
}
|
||||
}
|
||||
|
||||
var ir imageRequest
|
||||
if len(imageRequests) == 1 {
|
||||
// NOTE: the store currently does not support multi-image composes
|
||||
ir = imageRequests[0]
|
||||
} else {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Only single-image composes are currently supported")
|
||||
}
|
||||
|
||||
id, err := h.server.workers.EnqueueOSBuild(ir.arch, &worker.OSBuildJob{
|
||||
Manifest: ir.manifest,
|
||||
Targets: targets,
|
||||
Exports: ir.exports,
|
||||
})
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Failed to enqueue manifest")
|
||||
}
|
||||
|
||||
var response ComposeResult
|
||||
response.Id = id.String()
|
||||
|
||||
return ctx.JSON(http.StatusCreated, response)
|
||||
}
|
||||
|
||||
// ComposeStatus handles a /compose/{id} GET request
|
||||
func (h *apiHandlers) ComposeStatus(ctx echo.Context, id string) error {
|
||||
jobId, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err.Error()))
|
||||
}
|
||||
|
||||
var result worker.OSBuildJobResult
|
||||
status, _, err := h.server.workers.JobStatus(jobId, &result)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusNotFound, fmt.Sprintf("Job %s not found: %s", id, err.Error()))
|
||||
}
|
||||
|
||||
var us *UploadStatus
|
||||
if result.TargetResults != nil {
|
||||
// Only single upload target is allowed, therefore only a single upload target result is allowed as well
|
||||
if len(result.TargetResults) != 1 {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Job %s returned more upload target results than allowed", id))
|
||||
}
|
||||
tr := *result.TargetResults[0]
|
||||
|
||||
var uploadType UploadTypes
|
||||
var uploadOptions interface{}
|
||||
|
||||
switch tr.Name {
|
||||
case "org.osbuild.aws":
|
||||
uploadType = UploadTypes_aws
|
||||
awsOptions := tr.Options.(*target.AWSTargetResultOptions)
|
||||
uploadOptions = AWSUploadStatus{
|
||||
Ami: awsOptions.Ami,
|
||||
Region: awsOptions.Region,
|
||||
}
|
||||
case "org.osbuild.aws.s3":
|
||||
uploadType = UploadTypes_aws_s3
|
||||
awsOptions := tr.Options.(*target.AWSS3TargetResultOptions)
|
||||
uploadOptions = AWSS3UploadStatus{
|
||||
Url: awsOptions.URL,
|
||||
}
|
||||
case "org.osbuild.gcp":
|
||||
uploadType = UploadTypes_gcp
|
||||
gcpOptions := tr.Options.(*target.GCPTargetResultOptions)
|
||||
uploadOptions = GCPUploadStatus{
|
||||
ImageName: gcpOptions.ImageName,
|
||||
ProjectId: gcpOptions.ProjectID,
|
||||
}
|
||||
case "org.osbuild.azure.image":
|
||||
uploadType = UploadTypes_azure
|
||||
gcpOptions := tr.Options.(*target.AzureImageTargetResultOptions)
|
||||
uploadOptions = AzureUploadStatus{
|
||||
ImageName: gcpOptions.ImageName,
|
||||
}
|
||||
default:
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Job %s returned unknown upload target results %s", id, tr.Name))
|
||||
}
|
||||
|
||||
us = &UploadStatus{
|
||||
Status: result.UploadStatus,
|
||||
Type: uploadType,
|
||||
Options: uploadOptions,
|
||||
}
|
||||
}
|
||||
|
||||
response := ComposeStatus{
|
||||
ImageStatus: ImageStatus{
|
||||
Status: composeStatusFromJobStatus(status, &result),
|
||||
UploadStatus: us,
|
||||
},
|
||||
}
|
||||
return ctx.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
func composeStatusFromJobStatus(js *worker.JobStatus, result *worker.OSBuildJobResult) ImageStatusValue {
|
||||
if js.Canceled {
|
||||
return ImageStatusValue_failure
|
||||
}
|
||||
|
||||
if js.Started.IsZero() {
|
||||
return ImageStatusValue_pending
|
||||
}
|
||||
|
||||
if js.Finished.IsZero() {
|
||||
// TODO: handle also ImageStatusValue_uploading
|
||||
// TODO: handle also ImageStatusValue_registering
|
||||
return ImageStatusValue_building
|
||||
}
|
||||
|
||||
if result.Success {
|
||||
return ImageStatusValue_success
|
||||
}
|
||||
|
||||
return ImageStatusValue_failure
|
||||
}
|
||||
|
||||
// GetOpenapiJson handles a /openapi.json GET request
|
||||
func (h *apiHandlers) GetOpenapiJson(ctx echo.Context) error {
|
||||
spec, err := GetSwagger()
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Could not load openapi spec")
|
||||
}
|
||||
return ctx.JSON(http.StatusOK, spec)
|
||||
}
|
||||
|
||||
// GetVersion handles a /version GET request
|
||||
func (h *apiHandlers) GetVersion(ctx echo.Context) error {
|
||||
spec, err := GetSwagger()
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Could not load version")
|
||||
}
|
||||
version := Version{spec.Info.Version}
|
||||
return ctx.JSON(http.StatusOK, version)
|
||||
}
|
||||
|
||||
// ComposeMetadata handles a /compose/{id}/metadata GET request
|
||||
func (h *apiHandlers) ComposeMetadata(ctx echo.Context, id string) error {
|
||||
jobId, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err.Error()))
|
||||
}
|
||||
|
||||
var result worker.OSBuildJobResult
|
||||
status, _, err := h.server.workers.JobStatus(jobId, &result)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusNotFound, fmt.Sprintf("Job %s not found: %s", id, err.Error()))
|
||||
}
|
||||
|
||||
var job worker.OSBuildJob
|
||||
if _, _, _, err = h.server.workers.Job(jobId, &job); err != nil {
|
||||
return echo.NewHTTPError(http.StatusNotFound, fmt.Sprintf("Job %s not found: %s", id, err.Error()))
|
||||
}
|
||||
|
||||
if status.Finished.IsZero() {
|
||||
// job still running: empty response
|
||||
return ctx.JSON(200, ComposeMetadata{})
|
||||
}
|
||||
|
||||
manifestVer, err := job.Manifest.Version()
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Failed to parse manifest version: %s", err.Error()))
|
||||
}
|
||||
|
||||
var rpms []rpmmd.RPM
|
||||
var ostreeCommitResult *osbuild1.StageResult
|
||||
var coreStages []osbuild1.StageResult
|
||||
switch manifestVer {
|
||||
case "1":
|
||||
coreStages = result.OSBuildOutput.Stages
|
||||
if assemblerResult := result.OSBuildOutput.Assembler; assemblerResult.Name == "org.osbuild.ostree.commit" {
|
||||
ostreeCommitResult = result.OSBuildOutput.Assembler
|
||||
}
|
||||
case "2":
|
||||
// v2 manifest results store all stage output in the main stages
|
||||
// here we filter out the build stages to collect only the RPMs for the
|
||||
// core stages
|
||||
// the filtering relies on two assumptions:
|
||||
// 1. the build pipeline is named "build"
|
||||
// 2. the stage results from v2 manifests when converted to v1 are
|
||||
// named by prefixing the pipeline name
|
||||
for _, stage := range result.OSBuildOutput.Stages {
|
||||
if !strings.HasPrefix(stage.Name, "build") {
|
||||
coreStages = append(coreStages, stage)
|
||||
}
|
||||
}
|
||||
// find the ostree.commit stage
|
||||
for idx, stage := range result.OSBuildOutput.Stages {
|
||||
if strings.HasSuffix(stage.Name, "org.osbuild.ostree.commit") {
|
||||
ostreeCommitResult = &result.OSBuildOutput.Stages[idx]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Unknown manifest version: %s", manifestVer))
|
||||
}
|
||||
|
||||
rpms = rpmmd.OSBuildStagesToRPMs(coreStages)
|
||||
|
||||
packages := make([]PackageMetadata, len(rpms))
|
||||
for idx, rpm := range rpms {
|
||||
packages[idx] = PackageMetadata{
|
||||
Type: rpm.Type,
|
||||
Name: rpm.Name,
|
||||
Version: rpm.Version,
|
||||
Release: rpm.Release,
|
||||
Epoch: rpm.Epoch,
|
||||
Arch: rpm.Arch,
|
||||
Sigmd5: rpm.Sigmd5,
|
||||
Signature: rpm.Signature,
|
||||
}
|
||||
}
|
||||
|
||||
resp := new(ComposeMetadata)
|
||||
resp.Packages = &packages
|
||||
|
||||
if ostreeCommitResult != nil && ostreeCommitResult.Metadata != nil {
|
||||
commitMetadata, ok := ostreeCommitResult.Metadata.(*osbuild1.OSTreeCommitStageMetadata)
|
||||
if !ok {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Failed to convert ostree commit stage metadata")
|
||||
}
|
||||
resp.OstreeCommit = &commitMetadata.Compose.OSTreeCommit
|
||||
}
|
||||
|
||||
return ctx.JSON(http.StatusOK, resp)
|
||||
func (server *Server) V2(path string) http.Handler {
|
||||
return server.v2.Handler(path)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Package cloudapi provides primitives to interact the openapi HTTP API.
|
||||
// Package v1 provides primitives to interact the openapi HTTP API.
|
||||
//
|
||||
// Code generated by github.com/deepmap/oapi-codegen DO NOT EDIT.
|
||||
package cloudapi
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
591
internal/cloudapi/v1/v1.go
Normal file
591
internal/cloudapi/v1/v1.go
Normal file
|
|
@ -0,0 +1,591 @@
|
|||
//go:generate go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --package=v1 --generate types,spec,client,server -o openapi.v1.gen.go openapi.v1.yml
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild1"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/prometheus"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
)
|
||||
|
||||
// Server represents the state of the cloud Server
|
||||
type Server struct {
|
||||
workers *worker.Server
|
||||
rpmMetadata rpmmd.RPMMD
|
||||
distros *distroregistry.Registry
|
||||
}
|
||||
|
||||
type apiHandlers struct {
|
||||
server *Server
|
||||
}
|
||||
|
||||
type binder struct{}
|
||||
|
||||
// NewServer creates a new cloud server
|
||||
func NewServer(workers *worker.Server, rpmMetadata rpmmd.RPMMD, distros *distroregistry.Registry) *Server {
|
||||
server := &Server{
|
||||
workers: workers,
|
||||
rpmMetadata: rpmMetadata,
|
||||
distros: distros,
|
||||
}
|
||||
return server
|
||||
}
|
||||
|
||||
// Create an http.Handler() for this server, that provides the composer API at
|
||||
// the given path.
|
||||
func (server *Server) Handler(path string) http.Handler {
|
||||
e := echo.New()
|
||||
e.Binder = binder{}
|
||||
|
||||
handler := apiHandlers{
|
||||
server: server,
|
||||
}
|
||||
RegisterHandlers(e.Group(path, server.IncRequests), &handler)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (b binder) Bind(i interface{}, ctx echo.Context) error {
|
||||
contentType := ctx.Request().Header["Content-Type"]
|
||||
if len(contentType) != 1 || contentType[0] != "application/json" {
|
||||
return echo.NewHTTPError(http.StatusUnsupportedMediaType, "Only 'application/json' content type is supported")
|
||||
}
|
||||
|
||||
err := json.NewDecoder(ctx.Request().Body).Decode(i)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Cannot parse request body: %v", err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) IncRequests(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
prometheus.TotalRequests.Inc()
|
||||
if strings.HasSuffix(c.Path(), "/compose") {
|
||||
prometheus.ComposeRequests.Inc()
|
||||
}
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Compose handles a new /compose POST request
|
||||
func (h *apiHandlers) Compose(ctx echo.Context) error {
|
||||
contentType := ctx.Request().Header["Content-Type"]
|
||||
if len(contentType) != 1 || contentType[0] != "application/json" {
|
||||
return echo.NewHTTPError(http.StatusUnsupportedMediaType, "Only 'application/json' content type is supported")
|
||||
}
|
||||
|
||||
var request ComposeRequest
|
||||
err := ctx.Bind(&request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
distribution := h.server.distros.GetDistro(request.Distribution)
|
||||
if distribution == nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Unsupported distribution: %s", request.Distribution)
|
||||
}
|
||||
|
||||
var bp = blueprint.Blueprint{}
|
||||
err = bp.Initialize()
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to initialize blueprint")
|
||||
}
|
||||
if request.Customizations != nil && request.Customizations.Packages != nil {
|
||||
for _, p := range *request.Customizations.Packages {
|
||||
bp.Packages = append(bp.Packages, blueprint.Package{
|
||||
Name: p,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// imagerequest
|
||||
type imageRequest struct {
|
||||
manifest distro.Manifest
|
||||
arch string
|
||||
exports []string
|
||||
}
|
||||
imageRequests := make([]imageRequest, len(request.ImageRequests))
|
||||
var targets []*target.Target
|
||||
|
||||
// use the same seed for all images so we get the same IDs
|
||||
bigSeed, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
panic("cannot generate a manifest seed: " + err.Error())
|
||||
}
|
||||
manifestSeed := bigSeed.Int64()
|
||||
|
||||
for i, ir := range request.ImageRequests {
|
||||
arch, err := distribution.GetArch(ir.Architecture)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Unsupported architecture '%s' for distribution '%s'", ir.Architecture, request.Distribution)
|
||||
}
|
||||
imageType, err := arch.GetImageType(ir.ImageType)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Unsupported image type '%s' for %s/%s", ir.ImageType, ir.Architecture, request.Distribution)
|
||||
}
|
||||
repositories := make([]rpmmd.RepoConfig, len(ir.Repositories))
|
||||
for j, repo := range ir.Repositories {
|
||||
repositories[j].RHSM = repo.Rhsm
|
||||
|
||||
if repo.Baseurl != nil {
|
||||
repositories[j].BaseURL = *repo.Baseurl
|
||||
} else if repo.Mirrorlist != nil {
|
||||
repositories[j].MirrorList = *repo.Mirrorlist
|
||||
} else if repo.Metalink != nil {
|
||||
repositories[j].Metalink = *repo.Metalink
|
||||
} else {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Must specify baseurl, mirrorlist, or metalink")
|
||||
}
|
||||
}
|
||||
|
||||
packageSets := imageType.PackageSets(bp)
|
||||
pkgSpecSets := make(map[string][]rpmmd.PackageSpec)
|
||||
for name, packages := range packageSets {
|
||||
pkgs, _, err := h.server.rpmMetadata.Depsolve(packages, repositories, distribution.ModulePlatformID(), arch.Name(), distribution.Releasever())
|
||||
if err != nil {
|
||||
var error_type int
|
||||
switch err.(type) {
|
||||
// Known DNF errors falls under BadRequest
|
||||
case *rpmmd.DNFError:
|
||||
error_type = http.StatusBadRequest
|
||||
// All other kind of errors are internal server Errors.
|
||||
// (json marshalling issues for instance)
|
||||
case error:
|
||||
error_type = http.StatusInternalServerError
|
||||
}
|
||||
return echo.NewHTTPError(error_type, "Failed to depsolve base packages for %s/%s/%s: %s", ir.ImageType, ir.Architecture, request.Distribution, err)
|
||||
}
|
||||
pkgSpecSets[name] = pkgs
|
||||
}
|
||||
|
||||
imageOptions := distro.ImageOptions{Size: imageType.Size(0)}
|
||||
if request.Customizations != nil && request.Customizations.Subscription != nil {
|
||||
imageOptions.Subscription = &distro.SubscriptionImageOptions{
|
||||
Organization: fmt.Sprintf("%d", request.Customizations.Subscription.Organization),
|
||||
ActivationKey: request.Customizations.Subscription.ActivationKey,
|
||||
ServerUrl: request.Customizations.Subscription.ServerUrl,
|
||||
BaseUrl: request.Customizations.Subscription.BaseUrl,
|
||||
Insights: request.Customizations.Subscription.Insights,
|
||||
}
|
||||
}
|
||||
|
||||
// set default ostree ref, if one not provided
|
||||
ostreeOptions := ir.Ostree
|
||||
if ostreeOptions == nil || ostreeOptions.Ref == nil {
|
||||
imageOptions.OSTree = distro.OSTreeImageOptions{Ref: imageType.OSTreeRef()}
|
||||
} else if !ostree.VerifyRef(*ostreeOptions.Ref) {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Invalid OSTree ref: %s", *ostreeOptions.Ref)
|
||||
} else {
|
||||
imageOptions.OSTree = distro.OSTreeImageOptions{Ref: *ostreeOptions.Ref}
|
||||
}
|
||||
|
||||
var parent string
|
||||
if ostreeOptions != nil && ostreeOptions.Url != nil {
|
||||
imageOptions.OSTree.URL = *ostreeOptions.Url
|
||||
parent, err = ostree.ResolveRef(imageOptions.OSTree.URL, imageOptions.OSTree.Ref)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Error resolving OSTree repo %s: %s", imageOptions.OSTree.URL, err)
|
||||
}
|
||||
imageOptions.OSTree.Parent = parent
|
||||
}
|
||||
|
||||
// Set the blueprint customisation to take care of the user
|
||||
var blueprintCustoms *blueprint.Customizations
|
||||
if request.Customizations != nil && request.Customizations.Users != nil {
|
||||
var userCustomizations []blueprint.UserCustomization
|
||||
for _, user := range *request.Customizations.Users {
|
||||
var groups []string
|
||||
if user.Groups != nil {
|
||||
groups = *user.Groups
|
||||
} else {
|
||||
groups = nil
|
||||
}
|
||||
userCustomizations = append(userCustomizations,
|
||||
blueprint.UserCustomization{
|
||||
Name: user.Name,
|
||||
Key: user.Key,
|
||||
Groups: groups,
|
||||
},
|
||||
)
|
||||
}
|
||||
blueprintCustoms = &blueprint.Customizations{
|
||||
User: userCustomizations,
|
||||
}
|
||||
}
|
||||
|
||||
manifest, err := imageType.Manifest(blueprintCustoms, imageOptions, repositories, pkgSpecSets, manifestSeed)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Failed to get manifest for for %s/%s/%s: %s", ir.ImageType, ir.Architecture, request.Distribution, err)
|
||||
}
|
||||
|
||||
imageRequests[i].manifest = manifest
|
||||
imageRequests[i].arch = arch.Name()
|
||||
imageRequests[i].exports = imageType.Exports()
|
||||
|
||||
uploadRequest := ir.UploadRequest
|
||||
/* oneOf is not supported by the openapi generator so marshal and unmarshal the uploadrequest based on the type */
|
||||
if uploadRequest.Type == UploadTypes_aws {
|
||||
var awsUploadOptions AWSUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to marshal aws upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &awsUploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal aws upload request")
|
||||
}
|
||||
|
||||
var share []string
|
||||
if awsUploadOptions.Ec2.ShareWithAccounts != nil {
|
||||
share = *awsUploadOptions.Ec2.ShareWithAccounts
|
||||
}
|
||||
key := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewAWSTarget(&target.AWSTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: awsUploadOptions.Region,
|
||||
AccessKeyID: awsUploadOptions.S3.AccessKeyId,
|
||||
SecretAccessKey: awsUploadOptions.S3.SecretAccessKey,
|
||||
Bucket: awsUploadOptions.S3.Bucket,
|
||||
Key: key,
|
||||
ShareWithAccounts: share,
|
||||
})
|
||||
if awsUploadOptions.Ec2.SnapshotName != nil {
|
||||
t.ImageName = *awsUploadOptions.Ec2.SnapshotName
|
||||
} else {
|
||||
t.ImageName = key
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_aws_s3 {
|
||||
var awsS3UploadOptions AWSS3UploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal aws upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &awsS3UploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal aws upload request")
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewAWSS3Target(&target.AWSS3TargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: awsS3UploadOptions.Region,
|
||||
AccessKeyID: awsS3UploadOptions.S3.AccessKeyId,
|
||||
SecretAccessKey: awsS3UploadOptions.S3.SecretAccessKey,
|
||||
Bucket: awsS3UploadOptions.S3.Bucket,
|
||||
Key: key,
|
||||
})
|
||||
t.ImageName = key
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_gcp {
|
||||
var gcpUploadOptions GCPUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to marshal gcp upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &gcpUploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal gcp upload request")
|
||||
}
|
||||
|
||||
var share []string
|
||||
if gcpUploadOptions.ShareWithAccounts != nil {
|
||||
share = *gcpUploadOptions.ShareWithAccounts
|
||||
}
|
||||
var region string
|
||||
if gcpUploadOptions.Region != nil {
|
||||
region = *gcpUploadOptions.Region
|
||||
}
|
||||
object := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewGCPTarget(&target.GCPTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: region,
|
||||
Os: "", // not exposed in cloudapi for now
|
||||
Bucket: gcpUploadOptions.Bucket,
|
||||
Object: object,
|
||||
ShareWithAccounts: share,
|
||||
})
|
||||
// Import will fail if an image with this name already exists
|
||||
if gcpUploadOptions.ImageName != nil {
|
||||
t.ImageName = *gcpUploadOptions.ImageName
|
||||
} else {
|
||||
t.ImageName = object
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_azure {
|
||||
var azureUploadOptions AzureUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to marshal azure upload request")
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &azureUploadOptions)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to unmarshal azure upload request")
|
||||
}
|
||||
t := target.NewAzureImageTarget(&target.AzureImageTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
TenantID: azureUploadOptions.TenantId,
|
||||
Location: azureUploadOptions.Location,
|
||||
SubscriptionID: azureUploadOptions.SubscriptionId,
|
||||
ResourceGroup: azureUploadOptions.ResourceGroup,
|
||||
})
|
||||
|
||||
if azureUploadOptions.ImageName != nil {
|
||||
t.ImageName = *azureUploadOptions.ImageName
|
||||
} else {
|
||||
// if ImageName wasn't given, generate a random one
|
||||
t.ImageName = fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Unknown upload request type, only 'aws', 'azure' and 'gcp' are supported")
|
||||
}
|
||||
}
|
||||
|
||||
var ir imageRequest
|
||||
if len(imageRequests) == 1 {
|
||||
// NOTE: the store currently does not support multi-image composes
|
||||
ir = imageRequests[0]
|
||||
} else {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Only single-image composes are currently supported")
|
||||
}
|
||||
|
||||
id, err := h.server.workers.EnqueueOSBuild(ir.arch, &worker.OSBuildJob{
|
||||
Manifest: ir.manifest,
|
||||
Targets: targets,
|
||||
Exports: ir.exports,
|
||||
})
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Failed to enqueue manifest")
|
||||
}
|
||||
|
||||
var response ComposeResult
|
||||
response.Id = id.String()
|
||||
|
||||
return ctx.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
// ComposeStatus handles a /compose/{id} GET request
|
||||
func (h *apiHandlers) ComposeStatus(ctx echo.Context, id string) error {
|
||||
jobId, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Invalid format for parameter id: %s", err)
|
||||
}
|
||||
|
||||
var result worker.OSBuildJobResult
|
||||
status, _, err := h.server.workers.JobStatus(jobId, &result)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusNotFound, "Job %s not found: %s", id, err)
|
||||
}
|
||||
|
||||
var us *UploadStatus
|
||||
if result.TargetResults != nil {
|
||||
// Only single upload target is allowed, therefore only a single upload target result is allowed as well
|
||||
if len(result.TargetResults) != 1 {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Job %s returned more upload target results than allowed", id)
|
||||
}
|
||||
tr := *result.TargetResults[0]
|
||||
|
||||
var uploadType UploadTypes
|
||||
var uploadOptions interface{}
|
||||
|
||||
switch tr.Name {
|
||||
case "org.osbuild.aws":
|
||||
uploadType = UploadTypes_aws
|
||||
awsOptions := tr.Options.(*target.AWSTargetResultOptions)
|
||||
uploadOptions = AWSUploadStatus{
|
||||
Ami: awsOptions.Ami,
|
||||
Region: awsOptions.Region,
|
||||
}
|
||||
case "org.osbuild.aws.s3":
|
||||
uploadType = UploadTypes_aws_s3
|
||||
awsOptions := tr.Options.(*target.AWSS3TargetResultOptions)
|
||||
uploadOptions = AWSS3UploadStatus{
|
||||
Url: awsOptions.URL,
|
||||
}
|
||||
case "org.osbuild.gcp":
|
||||
uploadType = UploadTypes_gcp
|
||||
gcpOptions := tr.Options.(*target.GCPTargetResultOptions)
|
||||
uploadOptions = GCPUploadStatus{
|
||||
ImageName: gcpOptions.ImageName,
|
||||
ProjectId: gcpOptions.ProjectID,
|
||||
}
|
||||
case "org.osbuild.azure.image":
|
||||
uploadType = UploadTypes_azure
|
||||
gcpOptions := tr.Options.(*target.AzureImageTargetResultOptions)
|
||||
uploadOptions = AzureUploadStatus{
|
||||
ImageName: gcpOptions.ImageName,
|
||||
}
|
||||
default:
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Job %s returned unknown upload target results %s", id, tr.Name)
|
||||
}
|
||||
|
||||
us = &UploadStatus{
|
||||
Status: result.UploadStatus,
|
||||
Type: uploadType,
|
||||
Options: uploadOptions,
|
||||
}
|
||||
}
|
||||
|
||||
response := ComposeStatus{
|
||||
ImageStatus: ImageStatus{
|
||||
Status: composeStatusFromJobStatus(status, &result),
|
||||
UploadStatus: us,
|
||||
},
|
||||
}
|
||||
return ctx.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
func composeStatusFromJobStatus(js *worker.JobStatus, result *worker.OSBuildJobResult) ImageStatusValue {
|
||||
if js.Canceled {
|
||||
return ImageStatusValue_failure
|
||||
}
|
||||
|
||||
if js.Started.IsZero() {
|
||||
return ImageStatusValue_pending
|
||||
}
|
||||
|
||||
if js.Finished.IsZero() {
|
||||
// TODO: handle also ImageStatusValue_uploading
|
||||
// TODO: handle also ImageStatusValue_registering
|
||||
return ImageStatusValue_building
|
||||
}
|
||||
|
||||
if result.Success {
|
||||
return ImageStatusValue_success
|
||||
}
|
||||
|
||||
return ImageStatusValue_failure
|
||||
}
|
||||
|
||||
// GetOpenapiJson handles a /openapi.json GET request
|
||||
func (h *apiHandlers) GetOpenapiJson(ctx echo.Context) error {
|
||||
spec, err := GetSwagger()
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Could not load openapi spec")
|
||||
}
|
||||
return ctx.JSON(http.StatusOK, spec)
|
||||
}
|
||||
|
||||
// GetVersion handles a /version GET request
|
||||
func (h *apiHandlers) GetVersion(ctx echo.Context) error {
|
||||
spec, err := GetSwagger()
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Could not load version")
|
||||
}
|
||||
version := Version{spec.Info.Version}
|
||||
return ctx.JSON(http.StatusOK, version)
|
||||
}
|
||||
|
||||
// ComposeMetadata handles a /compose/{id}/metadata GET request
|
||||
func (h *apiHandlers) ComposeMetadata(ctx echo.Context, id string) error {
|
||||
jobId, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Invalid format for parameter id: %s", err)
|
||||
}
|
||||
|
||||
var result worker.OSBuildJobResult
|
||||
status, _, err := h.server.workers.JobStatus(jobId, &result)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusNotFound, "Job %s not found: %s", id, err)
|
||||
}
|
||||
|
||||
var job worker.OSBuildJob
|
||||
if _, _, _, err = h.server.workers.Job(jobId, &job); err != nil {
|
||||
return echo.NewHTTPError(http.StatusNotFound, "Job %s not found: %s", id, err)
|
||||
}
|
||||
|
||||
if status.Finished.IsZero() {
|
||||
// job still running: empty response
|
||||
return ctx.JSON(200, ComposeMetadata{})
|
||||
}
|
||||
|
||||
manifestVer, err := job.Manifest.Version()
|
||||
if err != nil {
|
||||
panic("Failed to parse manifest version: " + err.Error())
|
||||
}
|
||||
|
||||
var rpms []rpmmd.RPM
|
||||
var ostreeCommitResult *osbuild1.StageResult
|
||||
var coreStages []osbuild1.StageResult
|
||||
switch manifestVer {
|
||||
case "1":
|
||||
coreStages = result.OSBuildOutput.Stages
|
||||
if assemblerResult := result.OSBuildOutput.Assembler; assemblerResult.Name == "org.osbuild.ostree.commit" {
|
||||
ostreeCommitResult = result.OSBuildOutput.Assembler
|
||||
}
|
||||
case "2":
|
||||
// v2 manifest results store all stage output in the main stages
|
||||
// here we filter out the build stages to collect only the RPMs for the
|
||||
// core stages
|
||||
// the filtering relies on two assumptions:
|
||||
// 1. the build pipeline is named "build"
|
||||
// 2. the stage results from v2 manifests when converted to v1 are
|
||||
// named by prefixing the pipeline name
|
||||
for _, stage := range result.OSBuildOutput.Stages {
|
||||
if !strings.HasPrefix(stage.Name, "build") {
|
||||
coreStages = append(coreStages, stage)
|
||||
}
|
||||
}
|
||||
// find the ostree.commit stage
|
||||
for idx, stage := range result.OSBuildOutput.Stages {
|
||||
if strings.HasSuffix(stage.Name, "org.osbuild.ostree.commit") {
|
||||
ostreeCommitResult = &result.OSBuildOutput.Stages[idx]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic("Unknown manifest version: " + manifestVer)
|
||||
}
|
||||
|
||||
rpms = rpmmd.OSBuildStagesToRPMs(coreStages)
|
||||
|
||||
packages := make([]PackageMetadata, len(rpms))
|
||||
for idx, rpm := range rpms {
|
||||
packages[idx] = PackageMetadata{
|
||||
Type: rpm.Type,
|
||||
Name: rpm.Name,
|
||||
Version: rpm.Version,
|
||||
Release: rpm.Release,
|
||||
Epoch: rpm.Epoch,
|
||||
Arch: rpm.Arch,
|
||||
Sigmd5: rpm.Sigmd5,
|
||||
Signature: rpm.Signature,
|
||||
}
|
||||
}
|
||||
|
||||
resp := new(ComposeMetadata)
|
||||
resp.Packages = &packages
|
||||
|
||||
if ostreeCommitResult != nil && ostreeCommitResult.Metadata != nil {
|
||||
commitMetadata, ok := ostreeCommitResult.Metadata.(*osbuild1.OSTreeCommitStageMetadata)
|
||||
if !ok {
|
||||
panic("Failed to convert ostree commit stage metadata")
|
||||
}
|
||||
resp.OstreeCommit = &commitMetadata.Compose.OSTreeCommit
|
||||
}
|
||||
|
||||
return ctx.JSON(200, resp)
|
||||
}
|
||||
251
internal/cloudapi/v2/errors.go
Normal file
251
internal/cloudapi/v2/errors.go
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrorCodePrefix = "COMPOSER-"
|
||||
ErrorHREF = "/api/composer/v2/errors"
|
||||
|
||||
// ocm-sdk sends ErrorUnauthenticated with id 401 & code COMPOSER-401
|
||||
ErrorUnauthenticated ServiceErrorCode = 401
|
||||
|
||||
ErrorUnauthorized ServiceErrorCode = 2
|
||||
ErrorUnsupportedMediaType ServiceErrorCode = 3
|
||||
ErrorUnsupportedDistribution ServiceErrorCode = 4
|
||||
ErrorUnsupportedArchitecture ServiceErrorCode = 5
|
||||
ErrorUnsupportedImageType ServiceErrorCode = 6
|
||||
ErrorInvalidRepository ServiceErrorCode = 7
|
||||
ErrorDNFError ServiceErrorCode = 8
|
||||
ErrorInvalidOSTreeRef ServiceErrorCode = 9
|
||||
ErrorInvalidOSTreeRepo ServiceErrorCode = 10
|
||||
ErrorFailedToMakeManifest ServiceErrorCode = 11
|
||||
ErrorInvalidUploadType ServiceErrorCode = 12
|
||||
ErrorMultiImageCompose ServiceErrorCode = 13
|
||||
ErrorInvalidComposeId ServiceErrorCode = 14
|
||||
ErrorComposeNotFound ServiceErrorCode = 15
|
||||
ErrorInvalidErrorId ServiceErrorCode = 16
|
||||
ErrorErrorNotFound ServiceErrorCode = 17
|
||||
ErrorInvalidPageParam ServiceErrorCode = 18
|
||||
ErrorInvalidSizeParam ServiceErrorCode = 19
|
||||
ErrorBodyDecodingError ServiceErrorCode = 20
|
||||
ErrorResourceNotFound ServiceErrorCode = 21
|
||||
ErrorMethodNotAllowed ServiceErrorCode = 22
|
||||
ErrorNotAcceptable ServiceErrorCode = 23
|
||||
|
||||
// Internal errors, these are bugs
|
||||
ErrorFailedToInitializeBlueprint ServiceErrorCode = 1000
|
||||
ErrorFailedToGenerateManifestSeed ServiceErrorCode = 1001
|
||||
ErrorFailedToDepsolve ServiceErrorCode = 1002
|
||||
ErrorJSONMarshallingError ServiceErrorCode = 1003
|
||||
ErrorJSONUnMarshallingError ServiceErrorCode = 1004
|
||||
ErrorEnqueueingJob ServiceErrorCode = 1005
|
||||
ErrorSeveralUploadTargets ServiceErrorCode = 1006
|
||||
ErrorUnknownUploadTarget ServiceErrorCode = 1007
|
||||
ErrorFailedToLoadOpenAPISpec ServiceErrorCode = 1008
|
||||
ErrorFailedToParseManifestVersion ServiceErrorCode = 1009
|
||||
ErrorUnknownManifestVersion ServiceErrorCode = 1010
|
||||
ErrorUnableToConvertOSTreeCommitStageMetadata ServiceErrorCode = 1011
|
||||
ErrorMalformedOSBuildJobResult ServiceErrorCode = 1012
|
||||
|
||||
// Errors contained within this file
|
||||
ErrorUnspecified ServiceErrorCode = 10000
|
||||
ErrorNotHTTPError ServiceErrorCode = 10001
|
||||
ErrorServiceErrorNotFound ServiceErrorCode = 10002
|
||||
ErrorMalformedOperationID ServiceErrorCode = 10003
|
||||
)
|
||||
|
||||
type ServiceErrorCode int
|
||||
|
||||
type serviceError struct {
|
||||
code ServiceErrorCode
|
||||
httpStatus int
|
||||
reason string
|
||||
}
|
||||
|
||||
type serviceErrors []serviceError
|
||||
|
||||
// Maps ServiceErrorcode to a reason and http code
|
||||
func getServiceErrors() serviceErrors {
|
||||
return serviceErrors{
|
||||
serviceError{ErrorUnauthenticated, http.StatusUnauthorized, "Account authentication could not be verified"},
|
||||
serviceError{ErrorUnauthorized, http.StatusForbidden, "Account is unauthorized to perform this action"},
|
||||
serviceError{ErrorUnsupportedMediaType, http.StatusUnsupportedMediaType, "Only 'application/json' content is supported"},
|
||||
serviceError{ErrorUnsupportedDistribution, http.StatusBadRequest, "Unsupported distribution"},
|
||||
serviceError{ErrorUnsupportedArchitecture, http.StatusBadRequest, "Unsupported architecture"},
|
||||
serviceError{ErrorUnsupportedImageType, http.StatusBadRequest, "Unsupported image type"},
|
||||
serviceError{ErrorInvalidRepository, http.StatusBadRequest, "Must specify baseurl, mirrorlist, or metalink"},
|
||||
serviceError{ErrorDNFError, http.StatusBadRequest, "Failed to depsolve packages"},
|
||||
serviceError{ErrorInvalidOSTreeRef, http.StatusBadRequest, "Invalid OSTree ref"},
|
||||
serviceError{ErrorInvalidOSTreeRepo, http.StatusBadRequest, "Error resolving OSTree repo"},
|
||||
serviceError{ErrorFailedToMakeManifest, http.StatusBadRequest, "Failed to get manifest"},
|
||||
serviceError{ErrorInvalidUploadType, http.StatusBadRequest, "Unknown upload request type"},
|
||||
serviceError{ErrorMultiImageCompose, http.StatusBadRequest, "Only single-image composes are currently supported"},
|
||||
serviceError{ErrorInvalidComposeId, http.StatusBadRequest, "Invalid format for compose id"},
|
||||
serviceError{ErrorComposeNotFound, http.StatusNotFound, "Compose with given id not found"},
|
||||
serviceError{ErrorInvalidErrorId, http.StatusBadRequest, "Invalid format for error id, it should be an integer as a string"},
|
||||
serviceError{ErrorErrorNotFound, http.StatusNotFound, "Error with given id not found"},
|
||||
serviceError{ErrorInvalidPageParam, http.StatusBadRequest, "Invalid format for page param, it should be an integer as a string"},
|
||||
serviceError{ErrorInvalidSizeParam, http.StatusBadRequest, "Invalid format for size param, it should be an integer as a string"},
|
||||
serviceError{ErrorBodyDecodingError, http.StatusBadRequest, "Malformed json, unable to decode body"},
|
||||
serviceError{ErrorResourceNotFound, http.StatusNotFound, "Requested resource doesn't exist"},
|
||||
serviceError{ErrorMethodNotAllowed, http.StatusMethodNotAllowed, "Requested method isn't supported for resource"},
|
||||
serviceError{ErrorNotAcceptable, http.StatusNotAcceptable, "Only 'application/json' content is supported"},
|
||||
|
||||
serviceError{ErrorFailedToInitializeBlueprint, http.StatusInternalServerError, "Failed to initialize blueprint"},
|
||||
serviceError{ErrorFailedToGenerateManifestSeed, http.StatusInternalServerError, "Failed to generate manifest seed"},
|
||||
serviceError{ErrorFailedToDepsolve, http.StatusInternalServerError, "Failed to depsolve packages"},
|
||||
serviceError{ErrorJSONMarshallingError, http.StatusInternalServerError, "Failed to marshal struct"},
|
||||
serviceError{ErrorJSONUnMarshallingError, http.StatusInternalServerError, "Failed to unmarshal struct"},
|
||||
serviceError{ErrorEnqueueingJob, http.StatusInternalServerError, "Failed to enqueue job"},
|
||||
serviceError{ErrorSeveralUploadTargets, http.StatusInternalServerError, "Compose has more than one upload target"},
|
||||
serviceError{ErrorUnknownUploadTarget, http.StatusInternalServerError, "Compose has unknown upload target"},
|
||||
serviceError{ErrorFailedToLoadOpenAPISpec, http.StatusInternalServerError, "Unable to load openapi spec"},
|
||||
serviceError{ErrorFailedToParseManifestVersion, http.StatusInternalServerError, "Unable to parse manifest version"},
|
||||
serviceError{ErrorUnknownManifestVersion, http.StatusInternalServerError, "Unknown manifest version"},
|
||||
serviceError{ErrorUnableToConvertOSTreeCommitStageMetadata, http.StatusInternalServerError, "Unable to convert ostree commit stage metadata"},
|
||||
serviceError{ErrorMalformedOSBuildJobResult, http.StatusInternalServerError, "OSBuildJobResult does not have expected fields set"},
|
||||
|
||||
serviceError{ErrorUnspecified, http.StatusInternalServerError, "Unspecified internal error "},
|
||||
serviceError{ErrorNotHTTPError, http.StatusInternalServerError, "Error is not an instance of HTTPError"},
|
||||
serviceError{ErrorServiceErrorNotFound, http.StatusInternalServerError, "Error does not exist"},
|
||||
serviceError{ErrorMalformedOperationID, http.StatusInternalServerError, "OperationID is empty or is not a string"},
|
||||
}
|
||||
}
|
||||
|
||||
func find(code ServiceErrorCode) *serviceError {
|
||||
for _, e := range getServiceErrors() {
|
||||
if e.code == code {
|
||||
return &e
|
||||
}
|
||||
}
|
||||
return &serviceError{ErrorServiceErrorNotFound, http.StatusInternalServerError, "Error does not exist"}
|
||||
}
|
||||
|
||||
// Make an echo compatible error out of a service error
|
||||
func HTTPError(code ServiceErrorCode) error {
|
||||
return HTTPErrorWithInternal(code, nil)
|
||||
}
|
||||
|
||||
// echo.HTTPError has a message interface{} field, which can be used to include the ServiceErrorCode
|
||||
func HTTPErrorWithInternal(code ServiceErrorCode, internalErr error) error {
|
||||
se := find(code)
|
||||
he := echo.NewHTTPError(se.httpStatus, se.code)
|
||||
if internalErr != nil {
|
||||
he.Internal = internalErr
|
||||
}
|
||||
return he
|
||||
}
|
||||
|
||||
// Convert a ServiceErrorCode into an Error as defined in openapi.v2.yml
|
||||
// serviceError is optional, prevents multiple find() calls
|
||||
func APIError(code ServiceErrorCode, serviceError *serviceError, c echo.Context) *Error {
|
||||
se := serviceError
|
||||
if se == nil {
|
||||
se = find(code)
|
||||
}
|
||||
|
||||
operationID, ok := c.Get("operationID").(string)
|
||||
if !ok || operationID == "" {
|
||||
se = find(ErrorMalformedOperationID)
|
||||
}
|
||||
|
||||
return &Error{
|
||||
ObjectReference: ObjectReference{
|
||||
Href: fmt.Sprintf("%s/%d", ErrorHREF, se.code),
|
||||
Id: fmt.Sprintf("%d", se.code),
|
||||
Kind: "Error",
|
||||
},
|
||||
Code: fmt.Sprintf("%s%d", ErrorCodePrefix, se.code),
|
||||
OperationId: operationID, // set operation id from context
|
||||
Reason: se.reason,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to make the ErrorList as defined in openapi.v2.yml
|
||||
func APIErrorList(page int, pageSize int, c echo.Context) *ErrorList {
|
||||
list := &ErrorList{
|
||||
List: List{
|
||||
Kind: "ErrorList",
|
||||
Page: page,
|
||||
Size: 0,
|
||||
Total: len(getServiceErrors()),
|
||||
},
|
||||
Items: []Error{},
|
||||
}
|
||||
|
||||
if page < 0 || pageSize < 0 {
|
||||
return list
|
||||
}
|
||||
|
||||
min := func(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
errs := getServiceErrors()[min(page*pageSize, len(getServiceErrors())):min(((page+1)*pageSize), len(getServiceErrors()))]
|
||||
for _, e := range errs {
|
||||
list.Items = append(list.Items, *APIError(e.code, &e, c))
|
||||
}
|
||||
list.Size = len(list.Items)
|
||||
return list
|
||||
}
|
||||
|
||||
func apiErrorFromEchoError(echoError *echo.HTTPError) ServiceErrorCode {
|
||||
switch echoError.Code {
|
||||
case http.StatusNotFound:
|
||||
return ErrorResourceNotFound
|
||||
case http.StatusMethodNotAllowed:
|
||||
return ErrorMethodNotAllowed
|
||||
case http.StatusNotAcceptable:
|
||||
return ErrorNotAcceptable
|
||||
default:
|
||||
return ErrorUnspecified
|
||||
}
|
||||
}
|
||||
|
||||
// Convert an echo error into an AOC compliant one so we send a correct json error response
|
||||
func (s *Server) HTTPErrorHandler(echoError error, c echo.Context) {
|
||||
doResponse := func(code ServiceErrorCode, c echo.Context) {
|
||||
if !c.Response().Committed {
|
||||
var err error
|
||||
sec := find(code)
|
||||
apiErr := APIError(code, sec, c)
|
||||
|
||||
if sec.httpStatus == http.StatusInternalServerError {
|
||||
c.Logger().Errorf("Internal server error. Code: %s, OperationId: %s", apiErr.Code, apiErr.OperationId)
|
||||
}
|
||||
|
||||
if c.Request().Method == http.MethodHead {
|
||||
err = c.NoContent(sec.httpStatus)
|
||||
} else {
|
||||
err = c.JSON(sec.httpStatus, apiErr)
|
||||
}
|
||||
if err != nil {
|
||||
c.Logger().Errorf("Failed to return error response: %v", err)
|
||||
}
|
||||
} else {
|
||||
c.Logger().Infof("Failed to return error response, response already committed: %d", code)
|
||||
}
|
||||
}
|
||||
|
||||
he, ok := echoError.(*echo.HTTPError)
|
||||
if !ok {
|
||||
doResponse(ErrorNotHTTPError, c)
|
||||
return
|
||||
}
|
||||
|
||||
sec, ok := he.Message.(ServiceErrorCode)
|
||||
if !ok {
|
||||
// No service code was set, so Echo threw this error
|
||||
doResponse(apiErrorFromEchoError(he), c)
|
||||
return
|
||||
}
|
||||
doResponse(sec, c)
|
||||
}
|
||||
95
internal/cloudapi/v2/errors_test.go
Normal file
95
internal/cloudapi/v2/errors_test.go
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHTTPErrorReturnsEchoHTTPError(t *testing.T) {
|
||||
for _, se := range getServiceErrors() {
|
||||
err := HTTPError(se.code)
|
||||
echoError, ok := err.(*echo.HTTPError)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, se.httpStatus, echoError.Code)
|
||||
serviceErrorCode, ok := echoError.Message.(ServiceErrorCode)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, se.code, serviceErrorCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPIError(t *testing.T) {
|
||||
e := echo.New()
|
||||
for _, se := range getServiceErrors() {
|
||||
ctx := e.NewContext(nil, nil)
|
||||
ctx.Set("operationID", "test-operation-id")
|
||||
apiError := APIError(se.code, nil, ctx)
|
||||
require.Equal(t, fmt.Sprintf("/api/composer/v2/errors/%d", se.code), apiError.Href)
|
||||
require.Equal(t, fmt.Sprintf("%d", se.code), apiError.Id)
|
||||
require.Equal(t, "Error", apiError.Kind)
|
||||
require.Equal(t, fmt.Sprintf("COMPOSER-%d", se.code), apiError.Code)
|
||||
require.Equal(t, "test-operation-id", apiError.OperationId)
|
||||
require.Equal(t, se.reason, apiError.Reason)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPIErrorOperationID(t *testing.T) {
|
||||
ctx := echo.New().NewContext(nil, nil)
|
||||
|
||||
apiError := APIError(ErrorUnauthenticated, nil, ctx)
|
||||
require.Equal(t, "COMPOSER-10003", apiError.Code)
|
||||
|
||||
ctx.Set("operationID", 5)
|
||||
apiError = APIError(ErrorUnauthenticated, nil, ctx)
|
||||
require.Equal(t, "COMPOSER-10003", apiError.Code)
|
||||
|
||||
ctx.Set("operationID", "test-operation-id")
|
||||
apiError = APIError(ErrorUnauthenticated, nil, ctx)
|
||||
require.Equal(t, "COMPOSER-401", apiError.Code)
|
||||
}
|
||||
|
||||
func TestAPIErrorList(t *testing.T) {
|
||||
ctx := echo.New().NewContext(nil, nil)
|
||||
ctx.Set("operationID", "test-operation-id")
|
||||
|
||||
// negative values return empty list
|
||||
errs := APIErrorList(-10, -30, ctx)
|
||||
require.Equal(t, 0, errs.Size)
|
||||
require.Equal(t, 0, len(errs.Items))
|
||||
errs = APIErrorList(0, -30, ctx)
|
||||
require.Equal(t, 0, errs.Size)
|
||||
require.Equal(t, 0, len(errs.Items))
|
||||
errs = APIErrorList(-10, 0, ctx)
|
||||
require.Equal(t, 0, errs.Size)
|
||||
require.Equal(t, 0, len(errs.Items))
|
||||
|
||||
// all of them
|
||||
errs = APIErrorList(0, 1000, ctx)
|
||||
require.Equal(t, len(getServiceErrors()), errs.Size)
|
||||
|
||||
// some of them
|
||||
errs = APIErrorList(0, 10, ctx)
|
||||
require.Equal(t, 10, errs.Size)
|
||||
require.Equal(t, len(getServiceErrors()), errs.Total)
|
||||
require.Equal(t, 0, errs.Page)
|
||||
require.Equal(t, "COMPOSER-401", errs.Items[0].Code)
|
||||
errs = APIErrorList(1, 10, ctx)
|
||||
require.Equal(t, 10, errs.Size)
|
||||
require.Equal(t, len(getServiceErrors()), errs.Total)
|
||||
require.Equal(t, 1, errs.Page)
|
||||
require.Equal(t, "COMPOSER-11", errs.Items[0].Code)
|
||||
|
||||
// high page
|
||||
errs = APIErrorList(1000, 1, ctx)
|
||||
require.Equal(t, 0, errs.Size)
|
||||
require.Equal(t, len(getServiceErrors()), errs.Total)
|
||||
require.Equal(t, 1000, errs.Page)
|
||||
|
||||
// zero pagesize
|
||||
errs = APIErrorList(1000, 0, ctx)
|
||||
require.Equal(t, 0, errs.Size)
|
||||
require.Equal(t, len(getServiceErrors()), errs.Total)
|
||||
require.Equal(t, 1000, errs.Page)
|
||||
}
|
||||
573
internal/cloudapi/v2/openapi.v2.gen.go
Normal file
573
internal/cloudapi/v2/openapi.v2.gen.go
Normal file
|
|
@ -0,0 +1,573 @@
|
|||
// Package v2 provides primitives to interact the openapi HTTP API.
|
||||
//
|
||||
// Code generated by github.com/deepmap/oapi-codegen DO NOT EDIT.
|
||||
package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/deepmap/oapi-codegen/pkg/runtime"
|
||||
"github.com/getkin/kin-openapi/openapi3"
|
||||
"github.com/labstack/echo/v4"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AWSS3UploadRequestOptions defines model for AWSS3UploadRequestOptions.
|
||||
type AWSS3UploadRequestOptions struct {
|
||||
Region string `json:"region"`
|
||||
S3 AWSUploadRequestOptionsS3 `json:"s3"`
|
||||
}
|
||||
|
||||
// AWSS3UploadStatus defines model for AWSS3UploadStatus.
|
||||
type AWSS3UploadStatus struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
// AWSUploadRequestOptions defines model for AWSUploadRequestOptions.
|
||||
type AWSUploadRequestOptions struct {
|
||||
Ec2 AWSUploadRequestOptionsEc2 `json:"ec2"`
|
||||
Region string `json:"region"`
|
||||
S3 AWSUploadRequestOptionsS3 `json:"s3"`
|
||||
}
|
||||
|
||||
// AWSUploadRequestOptionsEc2 defines model for AWSUploadRequestOptionsEc2.
|
||||
type AWSUploadRequestOptionsEc2 struct {
|
||||
AccessKeyId string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
ShareWithAccounts *[]string `json:"share_with_accounts,omitempty"`
|
||||
SnapshotName *string `json:"snapshot_name,omitempty"`
|
||||
}
|
||||
|
||||
// AWSUploadRequestOptionsS3 defines model for AWSUploadRequestOptionsS3.
|
||||
type AWSUploadRequestOptionsS3 struct {
|
||||
AccessKeyId string `json:"access_key_id"`
|
||||
Bucket string `json:"bucket"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
}
|
||||
|
||||
// AWSUploadStatus defines model for AWSUploadStatus.
|
||||
type AWSUploadStatus struct {
|
||||
Ami string `json:"ami"`
|
||||
Region string `json:"region"`
|
||||
}
|
||||
|
||||
// AzureUploadRequestOptions defines model for AzureUploadRequestOptions.
|
||||
type AzureUploadRequestOptions struct {
|
||||
|
||||
// Name of the uploaded image. It must be unique in the given resource group.
|
||||
// If name is omitted from the request, a random one based on a UUID is
|
||||
// generated.
|
||||
ImageName *string `json:"image_name,omitempty"`
|
||||
|
||||
// Location where the image should be uploaded and registered.
|
||||
// How to list all locations:
|
||||
// https://docs.microsoft.com/en-us/cli/azure/account?view=azure-cli-latest#az_account_list_locations'
|
||||
Location string `json:"location"`
|
||||
|
||||
// Name of the resource group where the image should be uploaded.
|
||||
ResourceGroup string `json:"resource_group"`
|
||||
|
||||
// ID of subscription where the image should be uploaded.
|
||||
SubscriptionId string `json:"subscription_id"`
|
||||
|
||||
// ID of the tenant where the image should be uploaded.
|
||||
// How to find it in the Azure Portal:
|
||||
// https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant
|
||||
TenantId string `json:"tenant_id"`
|
||||
}
|
||||
|
||||
// AzureUploadStatus defines model for AzureUploadStatus.
|
||||
type AzureUploadStatus struct {
|
||||
ImageName string `json:"image_name"`
|
||||
}
|
||||
|
||||
// ComposeId defines model for ComposeId.
|
||||
type ComposeId struct {
|
||||
// Embedded struct due to allOf(#/components/schemas/ObjectReference)
|
||||
ObjectReference
|
||||
// Embedded fields due to inline allOf schema
|
||||
Id string `json:"id"`
|
||||
}
|
||||
|
||||
// ComposeMetadata defines model for ComposeMetadata.
|
||||
type ComposeMetadata struct {
|
||||
// Embedded struct due to allOf(#/components/schemas/ObjectReference)
|
||||
ObjectReference
|
||||
// Embedded fields due to inline allOf schema
|
||||
|
||||
// ID (hash) of the built commit
|
||||
OstreeCommit *string `json:"ostree_commit,omitempty"`
|
||||
|
||||
// Package list including NEVRA
|
||||
Packages *[]PackageMetadata `json:"packages,omitempty"`
|
||||
}
|
||||
|
||||
// ComposeRequest defines model for ComposeRequest.
|
||||
type ComposeRequest struct {
|
||||
// Embedded struct due to allOf(#/components/schemas/ObjectReference)
|
||||
ObjectReference
|
||||
// Embedded fields due to inline allOf schema
|
||||
Customizations *Customizations `json:"customizations,omitempty"`
|
||||
Distribution string `json:"distribution"`
|
||||
ImageRequests []ImageRequest `json:"image_requests"`
|
||||
}
|
||||
|
||||
// ComposeStatus defines model for ComposeStatus.
|
||||
type ComposeStatus struct {
|
||||
// Embedded struct due to allOf(#/components/schemas/ObjectReference)
|
||||
ObjectReference
|
||||
// Embedded fields due to inline allOf schema
|
||||
ImageStatus ImageStatus `json:"image_status"`
|
||||
}
|
||||
|
||||
// Customizations defines model for Customizations.
|
||||
type Customizations struct {
|
||||
Packages *[]string `json:"packages,omitempty"`
|
||||
Subscription *Subscription `json:"subscription,omitempty"`
|
||||
Users *[]User `json:"users,omitempty"`
|
||||
}
|
||||
|
||||
// Error defines model for Error.
|
||||
type Error struct {
|
||||
// Embedded struct due to allOf(#/components/schemas/ObjectReference)
|
||||
ObjectReference
|
||||
// Embedded fields due to inline allOf schema
|
||||
Code string `json:"code"`
|
||||
OperationId string `json:"operation_id"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// ErrorList defines model for ErrorList.
|
||||
type ErrorList struct {
|
||||
// Embedded struct due to allOf(#/components/schemas/List)
|
||||
List
|
||||
// Embedded fields due to inline allOf schema
|
||||
Items []Error `json:"items"`
|
||||
}
|
||||
|
||||
// GCPUploadRequestOptions defines model for GCPUploadRequestOptions.
|
||||
type GCPUploadRequestOptions struct {
|
||||
|
||||
// Name of an existing STANDARD Storage class Bucket.
|
||||
Bucket string `json:"bucket"`
|
||||
|
||||
// The name to use for the imported and shared Compute Engine image.
|
||||
// The image name must be unique within the GCP project, which is used
|
||||
// for the OS image upload and import. If not specified a random
|
||||
// 'composer-api-<uuid>' string is used as the image name.
|
||||
ImageName *string `json:"image_name,omitempty"`
|
||||
|
||||
// The GCP region where the OS image will be imported to and shared from.
|
||||
// The value must be a valid GCP location. See https://cloud.google.com/storage/docs/locations.
|
||||
// If not specified, the multi-region location closest to the source
|
||||
// (source Storage Bucket location) is chosen automatically.
|
||||
Region *string `json:"region,omitempty"`
|
||||
|
||||
// List of valid Google accounts to share the imported Compute Engine image with.
|
||||
// Each string must contain a specifier of the account type. Valid formats are:
|
||||
// - 'user:{emailid}': An email address that represents a specific
|
||||
// Google account. For example, 'alice@example.com'.
|
||||
// - 'serviceAccount:{emailid}': An email address that represents a
|
||||
// service account. For example, 'my-other-app@appspot.gserviceaccount.com'.
|
||||
// - 'group:{emailid}': An email address that represents a Google group.
|
||||
// For example, 'admins@example.com'.
|
||||
// - 'domain:{domain}': The G Suite domain (primary) that represents all
|
||||
// the users of that domain. For example, 'google.com' or 'example.com'.
|
||||
// If not specified, the imported Compute Engine image is not shared with any
|
||||
// account.
|
||||
ShareWithAccounts *[]string `json:"share_with_accounts,omitempty"`
|
||||
}
|
||||
|
||||
// GCPUploadStatus defines model for GCPUploadStatus.
|
||||
type GCPUploadStatus struct {
|
||||
ImageName string `json:"image_name"`
|
||||
ProjectId string `json:"project_id"`
|
||||
}
|
||||
|
||||
// ImageRequest defines model for ImageRequest.
|
||||
type ImageRequest struct {
|
||||
Architecture string `json:"architecture"`
|
||||
ImageType string `json:"image_type"`
|
||||
Ostree *OSTree `json:"ostree,omitempty"`
|
||||
Repositories []Repository `json:"repositories"`
|
||||
UploadRequest UploadRequest `json:"upload_request"`
|
||||
}
|
||||
|
||||
// ImageStatus defines model for ImageStatus.
|
||||
type ImageStatus struct {
|
||||
Status ImageStatusValue `json:"status"`
|
||||
UploadStatus *UploadStatus `json:"upload_status,omitempty"`
|
||||
}
|
||||
|
||||
// ImageStatusValue defines model for ImageStatusValue.
|
||||
type ImageStatusValue string
|
||||
|
||||
// List of ImageStatusValue
|
||||
const (
|
||||
ImageStatusValue_building ImageStatusValue = "building"
|
||||
ImageStatusValue_failure ImageStatusValue = "failure"
|
||||
ImageStatusValue_pending ImageStatusValue = "pending"
|
||||
ImageStatusValue_registering ImageStatusValue = "registering"
|
||||
ImageStatusValue_success ImageStatusValue = "success"
|
||||
ImageStatusValue_uploading ImageStatusValue = "uploading"
|
||||
)
|
||||
|
||||
// List defines model for List.
|
||||
type List struct {
|
||||
Kind string `json:"kind"`
|
||||
Page int `json:"page"`
|
||||
Size int `json:"size"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// OSTree defines model for OSTree.
|
||||
type OSTree struct {
|
||||
Ref *string `json:"ref,omitempty"`
|
||||
Url *string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectReference defines model for ObjectReference.
|
||||
type ObjectReference struct {
|
||||
Href string `json:"href"`
|
||||
Id string `json:"id"`
|
||||
Kind string `json:"kind"`
|
||||
}
|
||||
|
||||
// PackageMetadata defines model for PackageMetadata.
|
||||
type PackageMetadata struct {
|
||||
Arch string `json:"arch"`
|
||||
Epoch *string `json:"epoch,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Release string `json:"release"`
|
||||
Sigmd5 string `json:"sigmd5"`
|
||||
Signature *string `json:"signature,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// Repository defines model for Repository.
|
||||
type Repository struct {
|
||||
Baseurl *string `json:"baseurl,omitempty"`
|
||||
Metalink *string `json:"metalink,omitempty"`
|
||||
Mirrorlist *string `json:"mirrorlist,omitempty"`
|
||||
Rhsm bool `json:"rhsm"`
|
||||
}
|
||||
|
||||
// Subscription defines model for Subscription.
|
||||
type Subscription struct {
|
||||
ActivationKey string `json:"activation_key"`
|
||||
BaseUrl string `json:"base_url"`
|
||||
Insights bool `json:"insights"`
|
||||
Organization string `json:"organization"`
|
||||
ServerUrl string `json:"server_url"`
|
||||
}
|
||||
|
||||
// UploadRequest defines model for UploadRequest.
|
||||
type UploadRequest struct {
|
||||
Options interface{} `json:"options"`
|
||||
Type UploadTypes `json:"type"`
|
||||
}
|
||||
|
||||
// UploadStatus defines model for UploadStatus.
|
||||
type UploadStatus struct {
|
||||
Options interface{} `json:"options"`
|
||||
Status string `json:"status"`
|
||||
Type UploadTypes `json:"type"`
|
||||
}
|
||||
|
||||
// UploadTypes defines model for UploadTypes.
|
||||
type UploadTypes string
|
||||
|
||||
// List of UploadTypes
|
||||
const (
|
||||
UploadTypes_aws UploadTypes = "aws"
|
||||
UploadTypes_aws_s3 UploadTypes = "aws.s3"
|
||||
UploadTypes_azure UploadTypes = "azure"
|
||||
UploadTypes_gcp UploadTypes = "gcp"
|
||||
)
|
||||
|
||||
// User defines model for User.
|
||||
type User struct {
|
||||
// Embedded struct due to allOf(#/components/schemas/ObjectReference)
|
||||
ObjectReference
|
||||
// Embedded fields due to inline allOf schema
|
||||
Groups *[]string `json:"groups,omitempty"`
|
||||
Key *string `json:"key,omitempty"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Page defines model for page.
|
||||
type Page string
|
||||
|
||||
// Size defines model for size.
|
||||
type Size string
|
||||
|
||||
// PostComposeJSONBody defines parameters for PostCompose.
|
||||
type PostComposeJSONBody ComposeRequest
|
||||
|
||||
// GetErrorListParams defines parameters for GetErrorList.
|
||||
type GetErrorListParams struct {
|
||||
|
||||
// Page index
|
||||
Page *Page `json:"page,omitempty"`
|
||||
|
||||
// Number of items in each page
|
||||
Size *Size `json:"size,omitempty"`
|
||||
}
|
||||
|
||||
// PostComposeRequestBody defines body for PostCompose for application/json ContentType.
|
||||
type PostComposeJSONRequestBody PostComposeJSONBody
|
||||
|
||||
// ServerInterface represents all server handlers.
|
||||
type ServerInterface interface {
|
||||
// Create compose
|
||||
// (POST /compose)
|
||||
PostCompose(ctx echo.Context) error
|
||||
// The status of a compose
|
||||
// (GET /compose/{id})
|
||||
GetComposeStatus(ctx echo.Context, id string) error
|
||||
// Get the metadata for a compose.
|
||||
// (GET /compose/{id}/metadata)
|
||||
GetComposeMetadata(ctx echo.Context, id string) error
|
||||
// Get a list of all possible errors
|
||||
// (GET /errors)
|
||||
GetErrorList(ctx echo.Context, params GetErrorListParams) error
|
||||
// Get error description
|
||||
// (GET /errors/{id})
|
||||
GetError(ctx echo.Context, id string) error
|
||||
// Get the openapi spec in json format
|
||||
// (GET /openapi)
|
||||
GetOpenapi(ctx echo.Context) error
|
||||
}
|
||||
|
||||
// ServerInterfaceWrapper converts echo contexts to parameters.
|
||||
type ServerInterfaceWrapper struct {
|
||||
Handler ServerInterface
|
||||
}
|
||||
|
||||
// PostCompose converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) PostCompose(ctx echo.Context) error {
|
||||
var err error
|
||||
|
||||
ctx.Set("Bearer.Scopes", []string{""})
|
||||
|
||||
// Invoke the callback with all the unmarshalled arguments
|
||||
err = w.Handler.PostCompose(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetComposeStatus converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) GetComposeStatus(ctx echo.Context) error {
|
||||
var err error
|
||||
// ------------- Path parameter "id" -------------
|
||||
var id string
|
||||
|
||||
err = runtime.BindStyledParameter("simple", false, "id", ctx.Param("id"), &id)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err))
|
||||
}
|
||||
|
||||
ctx.Set("Bearer.Scopes", []string{""})
|
||||
|
||||
// Invoke the callback with all the unmarshalled arguments
|
||||
err = w.Handler.GetComposeStatus(ctx, id)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetComposeMetadata converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) GetComposeMetadata(ctx echo.Context) error {
|
||||
var err error
|
||||
// ------------- Path parameter "id" -------------
|
||||
var id string
|
||||
|
||||
err = runtime.BindStyledParameter("simple", false, "id", ctx.Param("id"), &id)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err))
|
||||
}
|
||||
|
||||
ctx.Set("Bearer.Scopes", []string{""})
|
||||
|
||||
// Invoke the callback with all the unmarshalled arguments
|
||||
err = w.Handler.GetComposeMetadata(ctx, id)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetErrorList converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) GetErrorList(ctx echo.Context) error {
|
||||
var err error
|
||||
|
||||
ctx.Set("Bearer.Scopes", []string{""})
|
||||
|
||||
// Parameter object where we will unmarshal all parameters from the context
|
||||
var params GetErrorListParams
|
||||
// ------------- Optional query parameter "page" -------------
|
||||
|
||||
err = runtime.BindQueryParameter("form", true, false, "page", ctx.QueryParams(), ¶ms.Page)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter page: %s", err))
|
||||
}
|
||||
|
||||
// ------------- Optional query parameter "size" -------------
|
||||
|
||||
err = runtime.BindQueryParameter("form", true, false, "size", ctx.QueryParams(), ¶ms.Size)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter size: %s", err))
|
||||
}
|
||||
|
||||
// Invoke the callback with all the unmarshalled arguments
|
||||
err = w.Handler.GetErrorList(ctx, params)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetError converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) GetError(ctx echo.Context) error {
|
||||
var err error
|
||||
// ------------- Path parameter "id" -------------
|
||||
var id string
|
||||
|
||||
err = runtime.BindStyledParameter("simple", false, "id", ctx.Param("id"), &id)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err))
|
||||
}
|
||||
|
||||
ctx.Set("Bearer.Scopes", []string{""})
|
||||
|
||||
// Invoke the callback with all the unmarshalled arguments
|
||||
err = w.Handler.GetError(ctx, id)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetOpenapi converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) GetOpenapi(ctx echo.Context) error {
|
||||
var err error
|
||||
|
||||
ctx.Set("Bearer.Scopes", []string{""})
|
||||
|
||||
// Invoke the callback with all the unmarshalled arguments
|
||||
err = w.Handler.GetOpenapi(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// This is a simple interface which specifies echo.Route addition functions which
|
||||
// are present on both echo.Echo and echo.Group, since we want to allow using
|
||||
// either of them for path registration
|
||||
type EchoRouter interface {
|
||||
CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
||||
}
|
||||
|
||||
// RegisterHandlers adds each server route to the EchoRouter.
|
||||
func RegisterHandlers(router EchoRouter, si ServerInterface) {
|
||||
|
||||
wrapper := ServerInterfaceWrapper{
|
||||
Handler: si,
|
||||
}
|
||||
|
||||
router.POST("/compose", wrapper.PostCompose)
|
||||
router.GET("/compose/:id", wrapper.GetComposeStatus)
|
||||
router.GET("/compose/:id/metadata", wrapper.GetComposeMetadata)
|
||||
router.GET("/errors", wrapper.GetErrorList)
|
||||
router.GET("/errors/:id", wrapper.GetError)
|
||||
router.GET("/openapi", wrapper.GetOpenapi)
|
||||
|
||||
}
|
||||
|
||||
// Base64 encoded, gzipped, json marshaled Swagger object
|
||||
var swaggerSpec = []string{
|
||||
|
||||
"H4sIAAAAAAAC/+xbe28bt7L/KsSeC7jF3dVbtiOg6FFsN0dtYgeW057e2DCo3ZGWJ7vkhuRaVgJ99ws+",
|
||||
"drUvPdy67SngfxJJJOfxm+FwZkh/dXwWJ4wClcIZfXUSzHEMErj9tgD1fwDC5ySRhFFn5LzHC0CEBvDo",
|
||||
"uA484jiJoDT9AUcpOCOn66zXrkPUms8p8JXjOhTHakTPdB3hhxBjtUSuEvW7kJzQhV4myJcG3pdpPAOO",
|
||||
"2BwRCbFAhCLAfogswaI0GYFcmk5nqzx67i551tmgJj3+ZTrtf0gihoNr+JyCkFdaQIMCZwlwSYwUHBZa",
|
||||
"8q+ZbM7IgdRbgpBe13GrjFxH9NXk/+Ewd0bOP9ob87StAO3xL9Mm3tO+Vo/D55RwCJzRx4y5JnqX82Kz",
|
||||
"/4AvFa+CHlOJZdogf8qjZvsU+ahJW+gfhhL4vd+o9YXfc7Q0/yUwu1qXJ4BxYVQv44F9H4S4/wSrexKU",
|
||||
"tRr/NBlPrqY/XJ1fXp5c/Hv87v3bi0YFwecg7zeUymSWP+KI//uDpD9cvJu0fzp5d35x+aY9e/94PSdn",
|
||||
"v1q6P1386rjOnPEYS71rhVgyHjSyCzGH+yWRoWLJUhtPcoYfnW6vPxgen5y+6nQ1QHoDN/hWThxzjlea",
|
||||
"NsWJCJm8N7u1qEa88rLRulQVM5VBbULoCWab9v8Qq81S/xPImo7257/azE8GNFdoJ7LbYg+OSVkbHBOv",
|
||||
"45/2Oyev+icnw+GrYTCYNaHyxHBQ1SsmTk6jUfIvKYfDIhuJ8QJyx60cZjgGdZTJEFCqqUGA9IIWmkgU",
|
||||
"p0KiGaCUks+pOnH1xAV5AIo4CJZyH9CCszRp3dLJHCkmiAjEYiIlBGjOWayXcCOjizDimAYsRowCmmEB",
|
||||
"AWIUYfThw+QcEXFLF0CBYwlB65ZujlPjg1qwJrAj5mNp4S4r+NaOoGUIHLQsmgoSIUujQCuX6Y1pgBTk",
|
||||
"QgLX/P/FlkgyFBEhEY4ilLERo1saSpmIUbsdMF+0YuJzJthctnwWt4F6qWj7EWljZaW2jUbfPxBYfqd/",
|
||||
"8vyIeBGWIOQ/8JcsXN0rRvc5k6MKAMp1IFWmbfY3Y457bY7dli6b7gBoqra4YamP6bUl80ZzbIoM6SwX",
|
||||
"wcajslCTcyVScdpvEGYAw+B01vM9POsNvMGg2/dedfyhd9zt9TvHcNp5Bb0m6SRQTOUOuZQQZtJhUll3",
|
||||
"mRMaICKz3aJ3KnrPuMTRIX6T+YwkD+AFhIMvGV+15ykNcAxU4kjURr2QLT3JPMXaMyJXQBr6JzAfzo69",
|
||||
"rt+fe4MAdzx83Ot5nVnnuNPrvwpOgpO9UWmDWN22NQ8s7Mo9AWxb8C0HrkMiQUXeAoEmEc5UxiVgoh0A",
|
||||
"R9HV3Bl93J2RXenF1zAHDtQHZ+3WhK4cu91eH1TS4cHpq5nX7QV9Dw+Gx96gd3w8HA4GnU6nUzz60pTs",
|
||||
"P/ZI0KDQ3UaldyBxgCV+TsWYkBzg3mdxTGTjlvkmxCL8Nts5s5REEtnpDdsvwf4nvDC0q9WdHjFxl1A/",
|
||||
"SgNCF+jy4ufrsVPI2nbpY2nkQNRyuvUu/OyR+pzw+amQLCZfcH5K76J3Vp69dp2AKOhmqaxlFTyEyDtt",
|
||||
"gtj4vz17jXcegt1ELcsgaAKu6IoluWosd3rpZt8/2+bT3EVOd6+WVoTmwGHpbNGhZs+yKEX3LpQgCRNy",
|
||||
"wUE8sfwoBNt9ek2Lc9eukwrbSjnI9h8E8EM2i+tccM74s+4QFkAjGmoSLuQQDbkPFgaY3WFTc8inVwg3",
|
||||
"m1lr+ZY8JRbo2Q2+mcF/kB0Muvs2nyHVLPmbs/eH1Qebgq85Y8QUwSMRUsXg6c348nx8fY6mknEVo/0I",
|
||||
"C4FeaxKtar5uv+yoHXfVJjchmIJCMpQKQHPGbQaWMC5tvq6L/gCpiJJKQBd0QahN0lq39CZP2DShSjmz",
|
||||
"JDK0Sdqbs/co4Uyh56JlSPxQlTGpgOCWZnyvppaWSfk0eyNLC6nah0kkEvDJnCjZbJ1zS498E+24hxPi",
|
||||
"3aadTt9Xh7z+BEfIgJGxQ1gU0kwl9VPqoE3RWYdSqWjGC9lsrtOSRJGCJgdXsiK+qpCzeOpmZg4lVt9J",
|
||||
"oKln+V4LTQFQluj6EUuD1oKxRQQ6zRXGdXQG3M6rHVtAFkF0tYhxGkniWcmz6ciPmAAhlZhqksk8b+k3",
|
||||
"trDJ3NM4Zr7sWwWzHzIBFOFUshhL4uMoWlVBhvQJHaZKxamyFjbPcNF6o2y6kldTKXtyk/tq92zd0gvs",
|
||||
"h5mTaNR9RiUmqmjOkOJZzmXZICV5C/2sJTCZpUCYw+iWIuShI3UkjL5CjElEgvXRCI0p0t8QDgIOQrkg",
|
||||
"lohDwkGosLTh5SsSqKJWC/3AOLLouegIR8SHf9rvyuZHLctZAH8gPozNuifKYFhbEtt4xyuPyVDvtuSf",
|
||||
"OElEwmRrYRdla4oi6WrlqWhY/bPWh5KrAkEQEyoaMQhYjAkdfTX/K4Z6e6JpSiQg8yv6JuEkxnz1bZ15",
|
||||
"FBmGumejDndjfSzt2ioim613hBhHRxWZmnfdbtckwqwxwUE5KsJ0dUszfMu76aPOQUY1r9D9upI/HGo8",
|
||||
"x3WM2eowO65jAS7++IRsq3LE7uge5ifs8xWwrmNPoVr7FgsfaICp9GYck8Drd/rDbn9vpVgg5+6rh0t5",
|
||||
"f70Vyv2QSPBlyivqPJ4e3x8Pth/v5udKF7Vpuikx96VGV9MbNUsrmjBBJOPVFGvX8uts0aop0zZne1bA",
|
||||
"7M2WiwlWvYlbRKwERkX0Gtu7zBrbPOvJJc7P+gpyo+BhBEruXVUvK4/KshpGyto0jfW0VDfjHdeZYxIZ",
|
||||
"KBKgqqTXzXkS2Y9GMvM5a8Oqb3cNnpJl5GVUPhHaXCBk18J2gFAJC1PnZFe09RHJJI6ahiowaKZufp9s",
|
||||
"rnHNYndrgu461osbbmvn9fK+fdo2m6wNQXPY2HpJWmdcKcRqEoRWhPpubgZ3C+r1lpWbYaU5NIFS7do0",
|
||||
"BqFGISBhW0ay8NuQLEeARfOYIIs4GG4bojgLglvOkoaBB+CCHFKk2gChxd4s24jrGhByGdX+K8S0enmH",
|
||||
"BVjv2DhVnpwHtMUhCLHpQKvUEqhsB0TItnK8043nKTpMtJlol9qVPGpyxxgkjgj91Mw1Jqq6Fa05BIxj",
|
||||
"e0S1GF+0s3Xfq/j4nRn3+j1VK/WOld7f5YfNXhE0k8gGirIQuQxquOUDlUxo/t9blL879dRZhOMCZ6z+",
|
||||
"PR6YX7R8r7GAq+kBsvBQxAXLzxiLANP6Lb6a1rQvppX+T/XSV5IH08eo3b7GK8/ciXrmMvSgm3Rl6vtG",
|
||||
"n6m7zAHaEyrIIqzcxkueglsDxHUYX2Bq22pl/r3OoNPvDZqvoPkD8LrIxb5ZS6FbkHxv6lSSxK2iXGJa",
|
||||
"gKygbpMlyxlDzZRs06RhFA5oNm1737J2967b8n5o38ptfaW9HLfeWOt2VRY496cjN6sExLawmQG4Hftt",
|
||||
"KdVvhz7Ljw6H/MAV1QLjCRBnKxS0m1zvsJyMp5RuS7x+r5msLG7NXrl9zLqCsHip5uOlaOnHTQs/UV+V",
|
||||
"qo0S6g72M7aldaFZLi82AUYPNr7uqhYWtcgsROhB0BsOu6/QeDwen/Uvv+CzbvR/55Pu5c3FUP02ueRv",
|
||||
"frrg734l//vu3Ydl+i98Pf4xvn7LJl+u573P573gfPil8/rmsX382CREvfRUlfj+5ydbSsS7tXnuk3Ii",
|
||||
"V1OFoIHoNWBuQJ/pTz9kB8KPv9xkbxp1mDfzcrrqRDEvGwmds3orbWpbPZLp20TbcqVC4igynQjRclwn",
|
||||
"Ij5Qk8jZx5TjBPshoF6r49jUOM89lstlC+thfeDbtaL9dnJ2cTm98HqtTiuUcaRtSKQG7Wr6WrO391Yc",
|
||||
"6Z4mwgkpZGgjp2cvK6gaGDn9VqfV1ZWBDDVMbdsJ1rGHiYaW+xkHLAFhRGGJ7GwXJUwlZQRH0Qr5jArb",
|
||||
"i2dzJOABOM6w0PDY5rR+kmqao4SjANQS22gtXnxMAmfkvGdCWtUc4wcg5GsWrMytjE4J9Y5KkoiYRmr7",
|
||||
"P/bCZfNeded1ZvladV32N5UKmBcsCVO2UNR6ne5zc58EhnEFcjOIQiyQkJhLCJQZB53Os/G3dzl13hNq",
|
||||
"msTW0tkrKcO/+8fzH6dSOcknoIgIRIw0hnv/j+f+geJUhoyTL+a6IQGuMkmUO6eRZPBnSPKJsiXN7WBA",
|
||||
"GP4ZLvCBwmMCvoQAgZqDmO+nXG2LYqzVx1gWZT/ere9cR6RxjFW9lwWNLLiodVmkaX8lwVofYk0XfG9A",
|
||||
"mssTfSbrqz5kz37EuCYYgZLMUtMXQNpR/CgNQKBlCDIEriZTZmhlEOoMAwII6uHmDcjyAwC39Ob/Y/Nj",
|
||||
"rJywEVYytNBXivotvQqxm6f09jVSMbwUH9Y/+9ucu1rs6jx37MpbcDUHKuPyl4WuLG68RK2XqHVQ1Lqp",
|
||||
"BJ6t4Uv3hLJu4M44lk00BOeEEhFWohcgeMS+RCrfVJuaMIo4yJRTCFAAqgYSiNHim+XsQbS5VN0RzfKu",
|
||||
"5Us82xvPNu/y6s51UzRl9vjCvDnPTPkS5l7C3N8jzNVik3JoXHBkFe40cVGIb7UQs3mGVgsuTZptprT1",
|
||||
"vdS2xlFhnr64+kO3/kaHJm83r33ZHFkwXrbZX7PNjKP//TYZzh0IRxFKmBBkFkHuTZtttr8mwtQ0maif",
|
||||
"/8WMkWzzvG+2QvrobN6oh2UAOd3fe+r3/+QzPDflyx592aNP2aNmbZG03pd5y3T7+XdlpzR7dVlYS07v",
|
||||
"VkQoUhjYV5B/x8xhpzrr/PLTxJlyrxsnpKWWi5DYPzHDCcmKK95+6DlVud/Zt4csSH3zYNZQ1xlEnbiQ",
|
||||
"eAFPZDGVeEHook5470qNIM2eOTrru/X/BwAA///lfr/IPEIAAA==",
|
||||
}
|
||||
|
||||
// GetSwagger returns the Swagger specification corresponding to the generated code
|
||||
// in this file.
|
||||
func GetSwagger() (*openapi3.Swagger, error) {
|
||||
zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error base64 decoding spec: %s", err)
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(zipped))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decompressing spec: %s", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
_, err = buf.ReadFrom(zr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decompressing spec: %s", err)
|
||||
}
|
||||
|
||||
swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromData(buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading Swagger: %s", err)
|
||||
}
|
||||
return swagger, nil
|
||||
}
|
||||
798
internal/cloudapi/v2/openapi.v2.yml
Normal file
798
internal/cloudapi/v2/openapi.v2.yml
Normal file
|
|
@ -0,0 +1,798 @@
|
|||
---
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
version: '2'
|
||||
title: OSBuild Composer cloud api
|
||||
description: Service to build and install images.
|
||||
license:
|
||||
name: Apache 2.0
|
||||
url: https://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
||||
servers:
|
||||
- url: https://api.openshift.com/api/composer/v2
|
||||
description: Main (production) server
|
||||
- url: https://api.stage.openshift.com/api/composer/v2
|
||||
description: Staging server
|
||||
- url: /api/composer/v2
|
||||
description: current domain
|
||||
|
||||
paths:
|
||||
/openapi:
|
||||
get:
|
||||
operationId: getOpenapi
|
||||
summary: Get the openapi spec in json format
|
||||
security:
|
||||
- Bearer: []
|
||||
responses:
|
||||
'200':
|
||||
description: openapi spec in json format
|
||||
'500':
|
||||
description: Unexpected error occurred
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/compose/{id}:
|
||||
get:
|
||||
operationId: getComposeStatus
|
||||
summary: The status of a compose
|
||||
security:
|
||||
- Bearer: []
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
example: '123e4567-e89b-12d3-a456-426655440000'
|
||||
required: true
|
||||
description: ID of compose status to get
|
||||
description: |-
|
||||
Get the status of a running or completed compose.
|
||||
This includes whether or not the compose succeeded.
|
||||
responses:
|
||||
'200':
|
||||
description: compose status
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ComposeStatus'
|
||||
'400':
|
||||
description: Invalid compose id
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'401':
|
||||
description: Auth token is invalid
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'403':
|
||||
description: Unauthorized to perform operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'404':
|
||||
description: Unknown compose id
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'500':
|
||||
description: Unexpected error occurred
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/compose/{id}/metadata:
|
||||
get:
|
||||
operationId: getComposeMetadata
|
||||
summary: Get the metadata for a compose.
|
||||
security:
|
||||
- Bearer: []
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
example: 123e4567-e89b-12d3-a456-426655440000
|
||||
required: true
|
||||
description: ID of compose status to get
|
||||
description: |-
|
||||
Get the metadata of a finished compose.
|
||||
The exact information returned depends on the requested image type.
|
||||
responses:
|
||||
'200':
|
||||
description: The metadata for the given compose.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ComposeMetadata'
|
||||
'400':
|
||||
description: Invalid compose id
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'401':
|
||||
description: Auth token is invalid
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'403':
|
||||
description: Unauthorized to perform operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'404':
|
||||
description: Unknown compose id
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'500':
|
||||
description: Unexpected error occurred
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/compose:
|
||||
post:
|
||||
operationId: postCompose
|
||||
summary: Create compose
|
||||
description: Create a new compose, potentially consisting of several images and upload each to their destinations.
|
||||
security:
|
||||
- Bearer: []
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ComposeRequest'
|
||||
responses:
|
||||
'201':
|
||||
description: Compose has started
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ComposeId'
|
||||
'400':
|
||||
description: Invalid compose request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'401':
|
||||
description: Auth token is invalid
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'403':
|
||||
description: Unauthorized to perform operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'404':
|
||||
description: Unknown compose id
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'500':
|
||||
description: Unexpected error occurred
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/errors/{id}:
|
||||
get:
|
||||
operationId: getError
|
||||
summary: Get error description
|
||||
description: Get an instance of the error specified by id
|
||||
security:
|
||||
- Bearer: []
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
schema:
|
||||
type: string
|
||||
example: '13'
|
||||
required: true
|
||||
description: ID of the error
|
||||
responses:
|
||||
'200':
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'401':
|
||||
description: Auth token is invalid
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'403':
|
||||
description: Unauthorized to perform operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'404':
|
||||
description: Unknown error id
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'500':
|
||||
description: Unexpected error occurred
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/errors:
|
||||
get:
|
||||
operationId: getErrorList
|
||||
summary: Get a list of all possible errors
|
||||
security:
|
||||
- Bearer: []
|
||||
parameters:
|
||||
- $ref: '#/components/parameters/page'
|
||||
- $ref: '#/components/parameters/size'
|
||||
responses:
|
||||
'200':
|
||||
description: A list of errors
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ErrorList'
|
||||
'401':
|
||||
description: Auth token is invalid
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'403':
|
||||
description: Unauthorized to perform operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'404':
|
||||
description: Unknown error id
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
'500':
|
||||
description: Unexpected error occurred
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
ObjectReference:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- kind
|
||||
- href
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
href:
|
||||
type: string
|
||||
|
||||
List:
|
||||
type: object
|
||||
properties:
|
||||
kind:
|
||||
type: string
|
||||
page:
|
||||
type: integer
|
||||
size:
|
||||
type: integer
|
||||
total:
|
||||
type: integer
|
||||
required:
|
||||
- kind
|
||||
- page
|
||||
- size
|
||||
- total
|
||||
- items
|
||||
|
||||
Error:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ObjectReference'
|
||||
- type: object
|
||||
required:
|
||||
- code
|
||||
- reason
|
||||
- operation_id
|
||||
properties:
|
||||
code:
|
||||
type: string
|
||||
reason:
|
||||
type: string
|
||||
operation_id:
|
||||
type: string
|
||||
|
||||
ErrorList:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/List'
|
||||
- type: object
|
||||
required:
|
||||
- items
|
||||
properties:
|
||||
items:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
ComposeStatus:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ObjectReference'
|
||||
- type: object
|
||||
required:
|
||||
- image_status
|
||||
properties:
|
||||
image_status:
|
||||
$ref: '#/components/schemas/ImageStatus'
|
||||
ImageStatus:
|
||||
required:
|
||||
- status
|
||||
properties:
|
||||
status:
|
||||
$ref: '#/components/schemas/ImageStatusValue'
|
||||
upload_status:
|
||||
$ref: '#/components/schemas/UploadStatus'
|
||||
ImageStatusValue:
|
||||
type: string
|
||||
enum: ['success', 'failure', 'pending', 'building', 'uploading', 'registering']
|
||||
UploadStatus:
|
||||
required:
|
||||
- status
|
||||
- type
|
||||
- options
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ['success', 'failure', 'pending', 'running']
|
||||
type:
|
||||
$ref: '#/components/schemas/UploadTypes'
|
||||
options:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/AWSUploadStatus'
|
||||
- $ref: '#/components/schemas/AWSS3UploadStatus'
|
||||
- $ref: '#/components/schemas/GCPUploadStatus'
|
||||
- $ref: '#/components/schemas/AzureUploadStatus'
|
||||
AWSUploadStatus:
|
||||
type: object
|
||||
required:
|
||||
- ami
|
||||
- region
|
||||
properties:
|
||||
ami:
|
||||
type: string
|
||||
example: 'ami-0c830793775595d4b'
|
||||
region:
|
||||
type: string
|
||||
example: 'eu-west-1'
|
||||
AWSS3UploadStatus:
|
||||
type: object
|
||||
required:
|
||||
- url
|
||||
properties:
|
||||
url:
|
||||
type: string
|
||||
GCPUploadStatus:
|
||||
type: object
|
||||
required:
|
||||
- project_id
|
||||
- image_name
|
||||
properties:
|
||||
project_id:
|
||||
type: string
|
||||
example: 'ascendant-braid-303513'
|
||||
image_name:
|
||||
type: string
|
||||
example: 'my-image'
|
||||
AzureUploadStatus:
|
||||
type: object
|
||||
required:
|
||||
- image_name
|
||||
properties:
|
||||
image_name:
|
||||
type: string
|
||||
example: 'my-image'
|
||||
|
||||
ComposeMetadata:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ObjectReference'
|
||||
- type: object
|
||||
properties:
|
||||
packages:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/PackageMetadata'
|
||||
description: 'Package list including NEVRA'
|
||||
ostree_commit:
|
||||
type: string
|
||||
description: 'ID (hash) of the built commit'
|
||||
PackageMetadata:
|
||||
required:
|
||||
- type
|
||||
- name
|
||||
- version
|
||||
- release
|
||||
- arch
|
||||
- sigmd5
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
release:
|
||||
type: string
|
||||
epoch:
|
||||
type: string
|
||||
arch:
|
||||
type: string
|
||||
sigmd5:
|
||||
type: string
|
||||
signature:
|
||||
type: string
|
||||
|
||||
ComposeRequest:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ObjectReference'
|
||||
- type: object
|
||||
required:
|
||||
- distribution
|
||||
- image_requests
|
||||
properties:
|
||||
distribution:
|
||||
type: string
|
||||
example: 'rhel-8'
|
||||
image_requests:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/ImageRequest'
|
||||
customizations:
|
||||
$ref: '#/components/schemas/Customizations'
|
||||
ImageRequest:
|
||||
required:
|
||||
- architecture
|
||||
- image_type
|
||||
- repositories
|
||||
- upload_request
|
||||
properties:
|
||||
architecture:
|
||||
type: string
|
||||
example: 'x86_64'
|
||||
image_type:
|
||||
type: string
|
||||
example: 'ami'
|
||||
repositories:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Repository'
|
||||
ostree:
|
||||
$ref: '#/components/schemas/OSTree'
|
||||
upload_request:
|
||||
$ref: '#/components/schemas/UploadRequest'
|
||||
Repository:
|
||||
type: object
|
||||
required:
|
||||
- rhsm
|
||||
properties:
|
||||
rhsm:
|
||||
type: boolean
|
||||
baseurl:
|
||||
type: string
|
||||
format: url
|
||||
example: 'https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/'
|
||||
mirrorlist:
|
||||
type: string
|
||||
format: url
|
||||
example: 'http://mirrorlist.centos.org/?release=8-stream&arch=aarch64&repo=BaseOS'
|
||||
metalink:
|
||||
type: string
|
||||
format: url
|
||||
example: 'https://mirrors.fedoraproject.org/metalink?repo=fedora-32&arch=x86_64'
|
||||
UploadRequest:
|
||||
type: object
|
||||
required:
|
||||
- type
|
||||
- options
|
||||
properties:
|
||||
type:
|
||||
$ref: '#/components/schemas/UploadTypes'
|
||||
options:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/AWSUploadRequestOptions'
|
||||
- $ref: '#/components/schemas/AWSS3UploadRequestOptions'
|
||||
- $ref: '#/components/schemas/GCPUploadRequestOptions'
|
||||
- $ref: '#/components/schemas/AzureUploadRequestOptions'
|
||||
UploadTypes:
|
||||
type: string
|
||||
enum: ['aws', 'aws.s3', 'gcp', 'azure']
|
||||
AWSUploadRequestOptions:
|
||||
type: object
|
||||
required:
|
||||
- region
|
||||
- s3
|
||||
- ec2
|
||||
properties:
|
||||
region:
|
||||
type: string
|
||||
example: 'eu-west-1'
|
||||
s3:
|
||||
$ref: '#/components/schemas/AWSUploadRequestOptionsS3'
|
||||
ec2:
|
||||
$ref: '#/components/schemas/AWSUploadRequestOptionsEc2'
|
||||
AWSS3UploadRequestOptions:
|
||||
type: object
|
||||
required:
|
||||
- region
|
||||
- s3
|
||||
properties:
|
||||
region:
|
||||
type: string
|
||||
example: 'eu-west-1'
|
||||
s3:
|
||||
$ref: '#/components/schemas/AWSUploadRequestOptionsS3'
|
||||
AWSUploadRequestOptionsS3:
|
||||
type: object
|
||||
required:
|
||||
- access_key_id
|
||||
- secret_access_key
|
||||
- bucket
|
||||
properties:
|
||||
access_key_id:
|
||||
type: string
|
||||
example: 'AKIAIOSFODNN7EXAMPLE'
|
||||
secret_access_key:
|
||||
type: string
|
||||
format: password
|
||||
example: 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'
|
||||
bucket:
|
||||
type: string
|
||||
example: 'my-bucket'
|
||||
AWSUploadRequestOptionsEc2:
|
||||
type: object
|
||||
required:
|
||||
- access_key_id
|
||||
- secret_access_key
|
||||
properties:
|
||||
access_key_id:
|
||||
type: string
|
||||
example: 'AKIAIOSFODNN7EXAMPLE'
|
||||
secret_access_key:
|
||||
type: string
|
||||
format: password
|
||||
example: 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'
|
||||
snapshot_name:
|
||||
type: string
|
||||
example: 'my-snapshot'
|
||||
share_with_accounts:
|
||||
type: array
|
||||
example: ['123456789012']
|
||||
items:
|
||||
type: string
|
||||
GCPUploadRequestOptions:
|
||||
type: object
|
||||
required:
|
||||
- bucket
|
||||
properties:
|
||||
region:
|
||||
type: string
|
||||
example: 'eu'
|
||||
description: |
|
||||
The GCP region where the OS image will be imported to and shared from.
|
||||
The value must be a valid GCP location. See https://cloud.google.com/storage/docs/locations.
|
||||
If not specified, the multi-region location closest to the source
|
||||
(source Storage Bucket location) is chosen automatically.
|
||||
bucket:
|
||||
type: string
|
||||
example: 'my-example-bucket'
|
||||
description: 'Name of an existing STANDARD Storage class Bucket.'
|
||||
# don't expose the os type for now
|
||||
# os:
|
||||
# type: string
|
||||
# example: 'rhel-8-byol'
|
||||
# description: 'OS of the disk image being imported needed for installation of GCP guest tools.'
|
||||
image_name:
|
||||
type: string
|
||||
example: 'my-image'
|
||||
description: |
|
||||
The name to use for the imported and shared Compute Engine image.
|
||||
The image name must be unique within the GCP project, which is used
|
||||
for the OS image upload and import. If not specified a random
|
||||
'composer-api-<uuid>' string is used as the image name.
|
||||
share_with_accounts:
|
||||
type: array
|
||||
example: [
|
||||
'user:alice@example.com',
|
||||
'serviceAccount:my-other-app@appspot.gserviceaccount.com',
|
||||
'group:admins@example.com',
|
||||
'domain:example.com'
|
||||
]
|
||||
description: |
|
||||
List of valid Google accounts to share the imported Compute Engine image with.
|
||||
Each string must contain a specifier of the account type. Valid formats are:
|
||||
- 'user:{emailid}': An email address that represents a specific
|
||||
Google account. For example, 'alice@example.com'.
|
||||
- 'serviceAccount:{emailid}': An email address that represents a
|
||||
service account. For example, 'my-other-app@appspot.gserviceaccount.com'.
|
||||
- 'group:{emailid}': An email address that represents a Google group.
|
||||
For example, 'admins@example.com'.
|
||||
- 'domain:{domain}': The G Suite domain (primary) that represents all
|
||||
the users of that domain. For example, 'google.com' or 'example.com'.
|
||||
If not specified, the imported Compute Engine image is not shared with any
|
||||
account.
|
||||
items:
|
||||
type: string
|
||||
AzureUploadRequestOptions:
|
||||
type: object
|
||||
required:
|
||||
- tenant_id
|
||||
- subscription_id
|
||||
- resource_group
|
||||
- location
|
||||
properties:
|
||||
tenant_id:
|
||||
type: string
|
||||
example: '5c7ef5b6-1c3f-4da0-a622-0b060239d7d7'
|
||||
description: |
|
||||
ID of the tenant where the image should be uploaded.
|
||||
How to find it in the Azure Portal:
|
||||
https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant
|
||||
subscription_id:
|
||||
type: string
|
||||
example: '4e5d8b2c-ab24-4413-90c5-612306e809e2'
|
||||
description: |
|
||||
ID of subscription where the image should be uploaded.
|
||||
resource_group:
|
||||
type: string
|
||||
example: 'ToucanResourceGroup'
|
||||
description: |
|
||||
Name of the resource group where the image should be uploaded.
|
||||
location:
|
||||
type: string
|
||||
example: 'westeurope'
|
||||
description: |
|
||||
Location where the image should be uploaded and registered.
|
||||
How to list all locations:
|
||||
https://docs.microsoft.com/en-us/cli/azure/account?view=azure-cli-latest#az_account_list_locations'
|
||||
image_name:
|
||||
type: string
|
||||
example: 'my-image'
|
||||
description: |
|
||||
Name of the uploaded image. It must be unique in the given resource group.
|
||||
If name is omitted from the request, a random one based on a UUID is
|
||||
generated.
|
||||
Customizations:
|
||||
type: object
|
||||
properties:
|
||||
subscription:
|
||||
$ref: '#/components/schemas/Subscription'
|
||||
packages:
|
||||
type: array
|
||||
example: ['postgres']
|
||||
items:
|
||||
type: string
|
||||
users:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/User'
|
||||
OSTree:
|
||||
type: object
|
||||
properties:
|
||||
url:
|
||||
type: string
|
||||
ref:
|
||||
type: string
|
||||
example: 'rhel/8/x86_64/edge'
|
||||
Subscription:
|
||||
type: object
|
||||
required:
|
||||
- organization
|
||||
- activation_key
|
||||
- server_url
|
||||
- base_url
|
||||
- insights
|
||||
properties:
|
||||
organization:
|
||||
type: string
|
||||
example: '2040324'
|
||||
activation_key:
|
||||
type: string
|
||||
format: password
|
||||
example: 'my-secret-key'
|
||||
server_url:
|
||||
type: string
|
||||
example: 'subscription.rhsm.redhat.com'
|
||||
base_url:
|
||||
type: string
|
||||
format: url
|
||||
example: 'http://cdn.redhat.com/'
|
||||
insights:
|
||||
type: boolean
|
||||
example: true
|
||||
User:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ObjectReference'
|
||||
- type: object
|
||||
required:
|
||||
- name
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
example: "user1"
|
||||
groups:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
example: "group1"
|
||||
key:
|
||||
type: string
|
||||
example: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINrGKErMYi+MMUwuHaRAJmRLoIzRf2qD2dD5z0BTx/6x"
|
||||
|
||||
ComposeId:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/ObjectReference'
|
||||
- type: object
|
||||
required:
|
||||
- id
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
format: uuid
|
||||
example: '123e4567-e89b-12d3-a456-426655440000'
|
||||
|
||||
parameters:
|
||||
page:
|
||||
name: page
|
||||
in: query
|
||||
description: Page index
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
examples:
|
||||
page:
|
||||
value: "1"
|
||||
size:
|
||||
name: size
|
||||
in: query
|
||||
description: Number of items in each page
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
examples:
|
||||
size:
|
||||
value: "100"
|
||||
|
||||
securitySchemes:
|
||||
Bearer:
|
||||
scheme: bearer
|
||||
bearerFormat: JWT
|
||||
type: http
|
||||
639
internal/cloudapi/v2/v2.go
Normal file
639
internal/cloudapi/v2/v2.go
Normal file
|
|
@ -0,0 +1,639 @@
|
|||
//go:generate go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen --package=v2 --generate types,spec,server -o openapi.v2.gen.go openapi.v2.yml
|
||||
package v2
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild1"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/prometheus"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
)
|
||||
|
||||
// Server represents the state of the cloud Server
|
||||
type Server struct {
|
||||
logger *log.Logger
|
||||
workers *worker.Server
|
||||
rpmMetadata rpmmd.RPMMD
|
||||
distros *distroregistry.Registry
|
||||
}
|
||||
|
||||
type apiHandlers struct {
|
||||
server *Server
|
||||
}
|
||||
|
||||
type binder struct{}
|
||||
|
||||
func NewServer(logger *log.Logger, workers *worker.Server, rpmMetadata rpmmd.RPMMD, distros *distroregistry.Registry) *Server {
|
||||
server := &Server{
|
||||
workers: workers,
|
||||
rpmMetadata: rpmMetadata,
|
||||
distros: distros,
|
||||
}
|
||||
return server
|
||||
}
|
||||
|
||||
func (server *Server) Handler(path string) http.Handler {
|
||||
e := echo.New()
|
||||
e.Binder = binder{}
|
||||
e.HTTPErrorHandler = server.HTTPErrorHandler
|
||||
e.StdLogger = server.logger
|
||||
e.Pre(common.OperationIDMiddleware)
|
||||
|
||||
handler := apiHandlers{
|
||||
server: server,
|
||||
}
|
||||
RegisterHandlers(e.Group(path, server.IncRequests), &handler)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (b binder) Bind(i interface{}, ctx echo.Context) error {
|
||||
contentType := ctx.Request().Header["Content-Type"]
|
||||
if len(contentType) != 1 || contentType[0] != "application/json" {
|
||||
return HTTPError(ErrorUnsupportedMediaType)
|
||||
}
|
||||
|
||||
err := json.NewDecoder(ctx.Request().Body).Decode(i)
|
||||
if err != nil {
|
||||
return HTTPErrorWithInternal(ErrorBodyDecodingError, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) IncRequests(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
prometheus.TotalRequests.Inc()
|
||||
if strings.HasSuffix(c.Path(), "/compose") {
|
||||
prometheus.ComposeRequests.Inc()
|
||||
}
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *apiHandlers) GetOpenapi(ctx echo.Context) error {
|
||||
spec, err := GetSwagger()
|
||||
if err != nil {
|
||||
return HTTPError(ErrorFailedToLoadOpenAPISpec)
|
||||
}
|
||||
return ctx.JSON(http.StatusOK, spec)
|
||||
}
|
||||
|
||||
func (h *apiHandlers) GetErrorList(ctx echo.Context, params GetErrorListParams) error {
|
||||
page := 0
|
||||
var err error
|
||||
if params.Page != nil {
|
||||
page, err = strconv.Atoi(string(*params.Page))
|
||||
if err != nil {
|
||||
return HTTPError(ErrorInvalidPageParam)
|
||||
}
|
||||
}
|
||||
|
||||
size := 100
|
||||
if params.Size != nil {
|
||||
size, err = strconv.Atoi(string(*params.Size))
|
||||
if err != nil {
|
||||
return HTTPError(ErrorInvalidSizeParam)
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.JSON(http.StatusOK, APIErrorList(page, size, ctx))
|
||||
}
|
||||
|
||||
func (h *apiHandlers) GetError(ctx echo.Context, id string) error {
|
||||
errorId, err := strconv.Atoi(id)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorInvalidErrorId)
|
||||
}
|
||||
|
||||
apiError := APIError(ServiceErrorCode(errorId), nil, ctx)
|
||||
// If the service error wasn't found, it's a 404 in this instance
|
||||
if apiError.Id == fmt.Sprintf("%d", ErrorServiceErrorNotFound) {
|
||||
return HTTPError(ErrorErrorNotFound)
|
||||
}
|
||||
return ctx.JSON(http.StatusOK, apiError)
|
||||
}
|
||||
|
||||
func (h *apiHandlers) PostCompose(ctx echo.Context) error {
|
||||
var request ComposeRequest
|
||||
err := ctx.Bind(&request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
distribution := h.server.distros.GetDistro(request.Distribution)
|
||||
if distribution == nil {
|
||||
return HTTPError(ErrorUnsupportedDistribution)
|
||||
}
|
||||
|
||||
var bp = blueprint.Blueprint{}
|
||||
err = bp.Initialize()
|
||||
if err != nil {
|
||||
return HTTPError(ErrorFailedToInitializeBlueprint)
|
||||
}
|
||||
if request.Customizations != nil && request.Customizations.Packages != nil {
|
||||
for _, p := range *request.Customizations.Packages {
|
||||
bp.Packages = append(bp.Packages, blueprint.Package{
|
||||
Name: p,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type imageRequest struct {
|
||||
manifest distro.Manifest
|
||||
arch string
|
||||
exports []string
|
||||
}
|
||||
imageRequests := make([]imageRequest, len(request.ImageRequests))
|
||||
var targets []*target.Target
|
||||
|
||||
// use the same seed for all images so we get the same IDs
|
||||
bigSeed, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
return HTTPError(ErrorFailedToGenerateManifestSeed)
|
||||
}
|
||||
manifestSeed := bigSeed.Int64()
|
||||
|
||||
for i, ir := range request.ImageRequests {
|
||||
arch, err := distribution.GetArch(ir.Architecture)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorUnsupportedArchitecture)
|
||||
}
|
||||
imageType, err := arch.GetImageType(ir.ImageType)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorUnsupportedImageType)
|
||||
}
|
||||
repositories := make([]rpmmd.RepoConfig, len(ir.Repositories))
|
||||
for j, repo := range ir.Repositories {
|
||||
repositories[j].RHSM = repo.Rhsm
|
||||
|
||||
if repo.Baseurl != nil {
|
||||
repositories[j].BaseURL = *repo.Baseurl
|
||||
} else if repo.Mirrorlist != nil {
|
||||
repositories[j].MirrorList = *repo.Mirrorlist
|
||||
} else if repo.Metalink != nil {
|
||||
repositories[j].Metalink = *repo.Metalink
|
||||
} else {
|
||||
return HTTPError(ErrorInvalidRepository)
|
||||
}
|
||||
}
|
||||
|
||||
packageSets := imageType.PackageSets(bp)
|
||||
pkgSpecSets := make(map[string][]rpmmd.PackageSpec)
|
||||
for name, packages := range packageSets {
|
||||
pkgs, _, err := h.server.rpmMetadata.Depsolve(packages, repositories, distribution.ModulePlatformID(), arch.Name(), distribution.Releasever())
|
||||
var dnfError *rpmmd.DNFError
|
||||
if err != nil && errors.As(err, &dnfError) {
|
||||
return HTTPError(ErrorDNFError)
|
||||
} else if err != nil {
|
||||
return HTTPError(ErrorFailedToDepsolve)
|
||||
}
|
||||
pkgSpecSets[name] = pkgs
|
||||
}
|
||||
|
||||
imageOptions := distro.ImageOptions{Size: imageType.Size(0)}
|
||||
if request.Customizations != nil && request.Customizations.Subscription != nil {
|
||||
imageOptions.Subscription = &distro.SubscriptionImageOptions{
|
||||
Organization: request.Customizations.Subscription.Organization,
|
||||
ActivationKey: request.Customizations.Subscription.ActivationKey,
|
||||
ServerUrl: request.Customizations.Subscription.ServerUrl,
|
||||
BaseUrl: request.Customizations.Subscription.BaseUrl,
|
||||
Insights: request.Customizations.Subscription.Insights,
|
||||
}
|
||||
}
|
||||
|
||||
// set default ostree ref, if one not provided
|
||||
ostreeOptions := ir.Ostree
|
||||
if ostreeOptions == nil || ostreeOptions.Ref == nil {
|
||||
imageOptions.OSTree = distro.OSTreeImageOptions{Ref: imageType.OSTreeRef()}
|
||||
} else if !ostree.VerifyRef(*ostreeOptions.Ref) {
|
||||
return HTTPError(ErrorInvalidOSTreeRef)
|
||||
} else {
|
||||
imageOptions.OSTree = distro.OSTreeImageOptions{Ref: *ostreeOptions.Ref}
|
||||
}
|
||||
|
||||
var parent string
|
||||
if ostreeOptions != nil && ostreeOptions.Url != nil {
|
||||
imageOptions.OSTree.URL = *ostreeOptions.Url
|
||||
parent, err = ostree.ResolveRef(imageOptions.OSTree.URL, imageOptions.OSTree.Ref)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorInvalidOSTreeRepo)
|
||||
}
|
||||
imageOptions.OSTree.Parent = parent
|
||||
}
|
||||
|
||||
// Set the blueprint customisation to take care of the user
|
||||
var blueprintCustoms *blueprint.Customizations
|
||||
if request.Customizations != nil && request.Customizations.Users != nil {
|
||||
var userCustomizations []blueprint.UserCustomization
|
||||
for _, user := range *request.Customizations.Users {
|
||||
var groups []string
|
||||
if user.Groups != nil {
|
||||
groups = *user.Groups
|
||||
} else {
|
||||
groups = nil
|
||||
}
|
||||
userCustomizations = append(userCustomizations,
|
||||
blueprint.UserCustomization{
|
||||
Name: user.Name,
|
||||
Key: user.Key,
|
||||
Groups: groups,
|
||||
},
|
||||
)
|
||||
}
|
||||
blueprintCustoms = &blueprint.Customizations{
|
||||
User: userCustomizations,
|
||||
}
|
||||
}
|
||||
|
||||
manifest, err := imageType.Manifest(blueprintCustoms, imageOptions, repositories, pkgSpecSets, manifestSeed)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorFailedToMakeManifest)
|
||||
}
|
||||
|
||||
imageRequests[i].manifest = manifest
|
||||
imageRequests[i].arch = arch.Name()
|
||||
imageRequests[i].exports = imageType.Exports()
|
||||
|
||||
uploadRequest := ir.UploadRequest
|
||||
/* oneOf is not supported by the openapi generator so marshal and unmarshal the uploadrequest based on the type */
|
||||
if uploadRequest.Type == UploadTypes_aws {
|
||||
var awsUploadOptions AWSUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONMarshallingError)
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &awsUploadOptions)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONUnMarshallingError)
|
||||
}
|
||||
|
||||
var share []string
|
||||
if awsUploadOptions.Ec2.ShareWithAccounts != nil {
|
||||
share = *awsUploadOptions.Ec2.ShareWithAccounts
|
||||
}
|
||||
key := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewAWSTarget(&target.AWSTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: awsUploadOptions.Region,
|
||||
AccessKeyID: awsUploadOptions.S3.AccessKeyId,
|
||||
SecretAccessKey: awsUploadOptions.S3.SecretAccessKey,
|
||||
Bucket: awsUploadOptions.S3.Bucket,
|
||||
Key: key,
|
||||
ShareWithAccounts: share,
|
||||
})
|
||||
if awsUploadOptions.Ec2.SnapshotName != nil {
|
||||
t.ImageName = *awsUploadOptions.Ec2.SnapshotName
|
||||
} else {
|
||||
t.ImageName = key
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_aws_s3 {
|
||||
var awsS3UploadOptions AWSS3UploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONMarshallingError)
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &awsS3UploadOptions)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONUnMarshallingError)
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewAWSS3Target(&target.AWSS3TargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: awsS3UploadOptions.Region,
|
||||
AccessKeyID: awsS3UploadOptions.S3.AccessKeyId,
|
||||
SecretAccessKey: awsS3UploadOptions.S3.SecretAccessKey,
|
||||
Bucket: awsS3UploadOptions.S3.Bucket,
|
||||
Key: key,
|
||||
})
|
||||
t.ImageName = key
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_gcp {
|
||||
var gcpUploadOptions GCPUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONMarshallingError)
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &gcpUploadOptions)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONUnMarshallingError)
|
||||
}
|
||||
|
||||
var share []string
|
||||
if gcpUploadOptions.ShareWithAccounts != nil {
|
||||
share = *gcpUploadOptions.ShareWithAccounts
|
||||
}
|
||||
var region string
|
||||
if gcpUploadOptions.Region != nil {
|
||||
region = *gcpUploadOptions.Region
|
||||
}
|
||||
object := fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
t := target.NewGCPTarget(&target.GCPTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
Region: region,
|
||||
Os: "", // not exposed in cloudapi for now
|
||||
Bucket: gcpUploadOptions.Bucket,
|
||||
Object: object,
|
||||
ShareWithAccounts: share,
|
||||
})
|
||||
// Import will fail if an image with this name already exists
|
||||
if gcpUploadOptions.ImageName != nil {
|
||||
t.ImageName = *gcpUploadOptions.ImageName
|
||||
} else {
|
||||
t.ImageName = object
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else if uploadRequest.Type == UploadTypes_azure {
|
||||
var azureUploadOptions AzureUploadRequestOptions
|
||||
jsonUploadOptions, err := json.Marshal(uploadRequest.Options)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONMarshallingError)
|
||||
}
|
||||
err = json.Unmarshal(jsonUploadOptions, &azureUploadOptions)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorJSONUnMarshallingError)
|
||||
}
|
||||
t := target.NewAzureImageTarget(&target.AzureImageTargetOptions{
|
||||
Filename: imageType.Filename(),
|
||||
TenantID: azureUploadOptions.TenantId,
|
||||
Location: azureUploadOptions.Location,
|
||||
SubscriptionID: azureUploadOptions.SubscriptionId,
|
||||
ResourceGroup: azureUploadOptions.ResourceGroup,
|
||||
})
|
||||
|
||||
if azureUploadOptions.ImageName != nil {
|
||||
t.ImageName = *azureUploadOptions.ImageName
|
||||
} else {
|
||||
// if ImageName wasn't given, generate a random one
|
||||
t.ImageName = fmt.Sprintf("composer-api-%s", uuid.New().String())
|
||||
}
|
||||
|
||||
targets = append(targets, t)
|
||||
} else {
|
||||
return HTTPError(ErrorInvalidUploadType)
|
||||
}
|
||||
}
|
||||
|
||||
var ir imageRequest
|
||||
if len(imageRequests) == 1 {
|
||||
// NOTE: the store currently does not support multi-image composes
|
||||
ir = imageRequests[0]
|
||||
} else {
|
||||
return HTTPError(ErrorMultiImageCompose)
|
||||
}
|
||||
|
||||
id, err := h.server.workers.EnqueueOSBuild(ir.arch, &worker.OSBuildJob{
|
||||
Manifest: ir.manifest,
|
||||
Targets: targets,
|
||||
Exports: ir.exports,
|
||||
})
|
||||
if err != nil {
|
||||
return HTTPError(ErrorEnqueueingJob)
|
||||
}
|
||||
|
||||
return ctx.JSON(http.StatusCreated, &ComposeId{
|
||||
ObjectReference: ObjectReference{
|
||||
Href: "/api/composer/v2/compose",
|
||||
Id: id.String(),
|
||||
Kind: "ComposeId",
|
||||
},
|
||||
Id: id.String(),
|
||||
})
|
||||
}
|
||||
|
||||
func (h *apiHandlers) GetComposeStatus(ctx echo.Context, id string) error {
|
||||
jobId, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorInvalidComposeId)
|
||||
}
|
||||
|
||||
var result worker.OSBuildJobResult
|
||||
status, _, err := h.server.workers.JobStatus(jobId, &result)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorComposeNotFound)
|
||||
}
|
||||
|
||||
var us *UploadStatus
|
||||
if result.TargetResults != nil {
|
||||
// Only single upload target is allowed, therefore only a single upload target result is allowed as well
|
||||
if len(result.TargetResults) != 1 {
|
||||
return HTTPError(ErrorSeveralUploadTargets)
|
||||
}
|
||||
tr := *result.TargetResults[0]
|
||||
|
||||
var uploadType UploadTypes
|
||||
var uploadOptions interface{}
|
||||
|
||||
switch tr.Name {
|
||||
case "org.osbuild.aws":
|
||||
uploadType = UploadTypes_aws
|
||||
awsOptions := tr.Options.(*target.AWSTargetResultOptions)
|
||||
uploadOptions = AWSUploadStatus{
|
||||
Ami: awsOptions.Ami,
|
||||
Region: awsOptions.Region,
|
||||
}
|
||||
case "org.osbuild.aws.s3":
|
||||
uploadType = UploadTypes_aws_s3
|
||||
awsOptions := tr.Options.(*target.AWSS3TargetResultOptions)
|
||||
uploadOptions = AWSS3UploadStatus{
|
||||
Url: awsOptions.URL,
|
||||
}
|
||||
case "org.osbuild.gcp":
|
||||
uploadType = UploadTypes_gcp
|
||||
gcpOptions := tr.Options.(*target.GCPTargetResultOptions)
|
||||
uploadOptions = GCPUploadStatus{
|
||||
ImageName: gcpOptions.ImageName,
|
||||
ProjectId: gcpOptions.ProjectID,
|
||||
}
|
||||
case "org.osbuild.azure.image":
|
||||
uploadType = UploadTypes_azure
|
||||
gcpOptions := tr.Options.(*target.AzureImageTargetResultOptions)
|
||||
uploadOptions = AzureUploadStatus{
|
||||
ImageName: gcpOptions.ImageName,
|
||||
}
|
||||
default:
|
||||
return HTTPError(ErrorUnknownUploadTarget)
|
||||
}
|
||||
|
||||
us = &UploadStatus{
|
||||
Status: result.UploadStatus,
|
||||
Type: uploadType,
|
||||
Options: uploadOptions,
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.JSON(http.StatusOK, ComposeStatus{
|
||||
ObjectReference: ObjectReference{
|
||||
Href: fmt.Sprintf("/api/composer/v2/compose/%v", jobId),
|
||||
Id: jobId.String(),
|
||||
Kind: "ComposeStatus",
|
||||
},
|
||||
ImageStatus: ImageStatus{
|
||||
Status: composeStatusFromJobStatus(status, &result),
|
||||
UploadStatus: us,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func composeStatusFromJobStatus(js *worker.JobStatus, result *worker.OSBuildJobResult) ImageStatusValue {
|
||||
if js.Canceled {
|
||||
return ImageStatusValue_failure
|
||||
}
|
||||
|
||||
if js.Started.IsZero() {
|
||||
return ImageStatusValue_pending
|
||||
}
|
||||
|
||||
if js.Finished.IsZero() {
|
||||
// TODO: handle also ImageStatusValue_uploading
|
||||
// TODO: handle also ImageStatusValue_registering
|
||||
return ImageStatusValue_building
|
||||
}
|
||||
|
||||
if result.Success {
|
||||
return ImageStatusValue_success
|
||||
}
|
||||
|
||||
return ImageStatusValue_failure
|
||||
}
|
||||
|
||||
// ComposeMetadata handles a /compose/{id}/metadata GET request
|
||||
func (h *apiHandlers) GetComposeMetadata(ctx echo.Context, id string) error {
|
||||
jobId, err := uuid.Parse(id)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorInvalidComposeId)
|
||||
}
|
||||
|
||||
var result worker.OSBuildJobResult
|
||||
status, _, err := h.server.workers.JobStatus(jobId, &result)
|
||||
if err != nil {
|
||||
return HTTPError(ErrorComposeNotFound)
|
||||
}
|
||||
|
||||
var job worker.OSBuildJob
|
||||
if _, _, _, err = h.server.workers.Job(jobId, &job); err != nil {
|
||||
return HTTPError(ErrorComposeNotFound)
|
||||
}
|
||||
|
||||
if status.Finished.IsZero() {
|
||||
// job still running: empty response
|
||||
return ctx.JSON(200, ComposeMetadata{
|
||||
ObjectReference: ObjectReference{
|
||||
Href: fmt.Sprintf("/api/composer/v2/%v/metadata", jobId),
|
||||
Id: jobId.String(),
|
||||
Kind: "ComposeMetadata",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if status.Canceled || !result.Success {
|
||||
// job canceled or failed, empty response
|
||||
return ctx.JSON(200, ComposeMetadata{
|
||||
ObjectReference: ObjectReference{
|
||||
Href: fmt.Sprintf("/api/composer/v2/%v/metadata", jobId),
|
||||
Id: jobId.String(),
|
||||
Kind: "ComposeMetadata",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
manifestVer, err := job.Manifest.Version()
|
||||
if err != nil {
|
||||
return HTTPError(ErrorFailedToParseManifestVersion)
|
||||
}
|
||||
|
||||
if result.OSBuildOutput == nil || result.OSBuildOutput.Assembler == nil {
|
||||
return HTTPError(ErrorMalformedOSBuildJobResult)
|
||||
}
|
||||
|
||||
var rpms []rpmmd.RPM
|
||||
var ostreeCommitResult *osbuild1.StageResult
|
||||
var coreStages []osbuild1.StageResult
|
||||
switch manifestVer {
|
||||
case "1":
|
||||
coreStages = result.OSBuildOutput.Stages
|
||||
if assemblerResult := result.OSBuildOutput.Assembler; assemblerResult.Name == "org.osbuild.ostree.commit" {
|
||||
ostreeCommitResult = result.OSBuildOutput.Assembler
|
||||
}
|
||||
case "2":
|
||||
// v2 manifest results store all stage output in the main stages
|
||||
// here we filter out the build stages to collect only the RPMs for the
|
||||
// core stages
|
||||
// the filtering relies on two assumptions:
|
||||
// 1. the build pipeline is named "build"
|
||||
// 2. the stage results from v2 manifests when converted to v1 are
|
||||
// named by prefixing the pipeline name
|
||||
for _, stage := range result.OSBuildOutput.Stages {
|
||||
if !strings.HasPrefix(stage.Name, "build") {
|
||||
coreStages = append(coreStages, stage)
|
||||
}
|
||||
}
|
||||
// find the ostree.commit stage
|
||||
for idx, stage := range result.OSBuildOutput.Stages {
|
||||
if strings.HasSuffix(stage.Name, "org.osbuild.ostree.commit") {
|
||||
ostreeCommitResult = &result.OSBuildOutput.Stages[idx]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
return HTTPError(ErrorUnknownManifestVersion)
|
||||
}
|
||||
|
||||
rpms = rpmmd.OSBuildStagesToRPMs(coreStages)
|
||||
|
||||
packages := make([]PackageMetadata, len(rpms))
|
||||
for idx, rpm := range rpms {
|
||||
packages[idx] = PackageMetadata{
|
||||
Type: rpm.Type,
|
||||
Name: rpm.Name,
|
||||
Version: rpm.Version,
|
||||
Release: rpm.Release,
|
||||
Epoch: rpm.Epoch,
|
||||
Arch: rpm.Arch,
|
||||
Sigmd5: rpm.Sigmd5,
|
||||
Signature: rpm.Signature,
|
||||
}
|
||||
}
|
||||
|
||||
resp := &ComposeMetadata{
|
||||
ObjectReference: ObjectReference{
|
||||
Href: fmt.Sprintf("/api/composer/v2/compose/%v/metadata", jobId),
|
||||
Id: jobId.String(),
|
||||
Kind: "ComposeMetadata",
|
||||
},
|
||||
Packages: &packages,
|
||||
}
|
||||
|
||||
if ostreeCommitResult != nil && ostreeCommitResult.Metadata != nil {
|
||||
commitMetadata, ok := ostreeCommitResult.Metadata.(*osbuild1.OSTreeCommitStageMetadata)
|
||||
if !ok {
|
||||
return HTTPError(ErrorUnableToConvertOSTreeCommitStageMetadata)
|
||||
}
|
||||
resp.OstreeCommit = &commitMetadata.Compose.OSTreeCommit
|
||||
}
|
||||
|
||||
return ctx.JSON(200, resp)
|
||||
}
|
||||
417
internal/cloudapi/v2/v2_test.go
Normal file
417
internal/cloudapi/v2/v2_test.go
Normal file
|
|
@ -0,0 +1,417 @@
|
|||
package v2_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/cloudapi/v2"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/test_distro"
|
||||
distro_mock "github.com/osbuild/osbuild-composer/internal/mocks/distro"
|
||||
rpmmd_mock "github.com/osbuild/osbuild-composer/internal/mocks/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/test"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
)
|
||||
|
||||
func newV2Server(t *testing.T, dir string) (*v2.Server, *worker.Server) {
|
||||
rpmFixture := rpmmd_mock.BaseFixture(dir)
|
||||
rpm := rpmmd_mock.NewRPMMDMock(rpmFixture)
|
||||
require.NotNil(t, rpm)
|
||||
|
||||
distros, err := distro_mock.NewDefaultRegistry()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, distros)
|
||||
|
||||
v2Server := v2.NewServer(log.New(os.Stdout, "", 0), rpmFixture.Workers, rpm, distros)
|
||||
require.NotNil(t, v2Server)
|
||||
|
||||
return v2Server, rpmFixture.Workers
|
||||
}
|
||||
|
||||
func TestUnknownRoute(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
srv, _ := newV2Server(t, dir)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", "/api/composer/v2/badroute", ``, http.StatusNotFound, `
|
||||
{
|
||||
"href": "/api/composer/v2/errors/21",
|
||||
"id": "21",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-21",
|
||||
"reason": "Requested resource doesn't exist"
|
||||
}`, "operation_id")
|
||||
}
|
||||
|
||||
func TestGetError(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
srv, _ := newV2Server(t, dir)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", "/api/composer/v2/errors/4", ``, http.StatusOK, `
|
||||
{
|
||||
"href": "/api/composer/v2/errors/4",
|
||||
"id": "4",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-4",
|
||||
"reason": "Unsupported distribution"
|
||||
}`, "operation_id")
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", "/api/composer/v2/errors/3000", ``, http.StatusNotFound, `
|
||||
{
|
||||
"href": "/api/composer/v2/errors/17",
|
||||
"id": "17",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-17",
|
||||
"reason": "Error with given id not found"
|
||||
}`, "operation_id")
|
||||
}
|
||||
|
||||
func TestGetErrorList(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
srv, _ := newV2Server(t, dir)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", "/api/composer/v2/errors?page=3&size=1", ``, http.StatusOK, `
|
||||
{
|
||||
"kind": "ErrorList",
|
||||
"page": 3,
|
||||
"size": 1,
|
||||
"items": [{
|
||||
"href": "/api/composer/v2/errors/4",
|
||||
"id": "4",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-4",
|
||||
"reason": "Unsupported distribution"
|
||||
}]
|
||||
}`, "operation_id", "total")
|
||||
}
|
||||
|
||||
func TestCompose(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
srv, _ := newV2Server(t, dir)
|
||||
|
||||
// unsupported distribution
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "POST", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "unsupported_distro",
|
||||
"image_requests":[{
|
||||
"architecture": "%s",
|
||||
"image_type": "%s",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_request": {
|
||||
"type": "aws.s3",
|
||||
"options": {
|
||||
"access_key_id": "somekey",
|
||||
"secret_access_key": "somesecretkey",
|
||||
"bucket": "somebucket"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}`, test_distro.TestArchName, test_distro.TestImageTypeName), http.StatusBadRequest, `
|
||||
{
|
||||
"href": "/api/composer/v2/errors/4",
|
||||
"id": "4",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-4",
|
||||
"reason": "Unsupported distribution"
|
||||
}`, "operation_id")
|
||||
|
||||
// unsupported architecture
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "POST", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "%s",
|
||||
"image_requests":[{
|
||||
"architecture": "unsupported_arch",
|
||||
"image_type": "%s",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_request": {
|
||||
"type": "aws.s3",
|
||||
"options": {
|
||||
"access_key_id": "somekey",
|
||||
"secret_access_key": "somesecretkey",
|
||||
"bucket": "somebucket"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}`, test_distro.TestDistroName, test_distro.TestImageTypeName), http.StatusBadRequest, `
|
||||
{
|
||||
"href": "/api/composer/v2/errors/5",
|
||||
"id": "5",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-5",
|
||||
"reason": "Unsupported architecture"
|
||||
}`, "operation_id")
|
||||
|
||||
// unsupported imagetype
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "POST", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "%s",
|
||||
"image_requests":[{
|
||||
"architecture": "%s",
|
||||
"image_type": "unsupported_image_type",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_request": {
|
||||
"type": "aws.s3",
|
||||
"options": {
|
||||
"access_key_id": "somekey",
|
||||
"secret_access_key": "somesecretkey",
|
||||
"bucket": "somebucket"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}`, test_distro.TestDistroName, test_distro.TestArchName), http.StatusBadRequest, `
|
||||
{
|
||||
"href": "/api/composer/v2/errors/6",
|
||||
"id": "6",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-6",
|
||||
"reason": "Unsupported image type"
|
||||
}`, "operation_id")
|
||||
|
||||
// Returns 404, but should be 405; see https://github.com/labstack/echo/issues/1981
|
||||
// test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
// {
|
||||
// "distribution": "%s",
|
||||
// "image_requests":[{
|
||||
// "architecture": "%s",
|
||||
// "image_type": "%s",
|
||||
// "repositories": [{
|
||||
// "baseurl": "somerepo.org",
|
||||
// "rhsm": false
|
||||
// }],
|
||||
// "upload_request": {
|
||||
// "type": "aws.s3",
|
||||
// "options": {
|
||||
// "access_key_id": "somekey",
|
||||
// "secret_access_key": "somesecretkey",
|
||||
// "bucket": "somebucket"
|
||||
// }
|
||||
// }
|
||||
// }]
|
||||
// }`, test_distro.TestDistroName, test_distro.TestArchName, test_distro.TestImageTypeName), http.StatusMethodNotAllowed, `
|
||||
// {
|
||||
// "href": "/api/composer/v2/errors/22",
|
||||
// "id": "22",
|
||||
// "kind": "Error",
|
||||
// "code": "COMPOSER-22",
|
||||
// "reason": "Requested method isn't supported for resource"
|
||||
// }`, "operation_id")
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "POST", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "%s",
|
||||
"image_requests":[{
|
||||
"architecture": "%s",
|
||||
"image_type": "%s",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_request": {
|
||||
"type": "aws.s3",
|
||||
"options": {
|
||||
"access_key_id": "somekey",
|
||||
"secret_access_key": "somesecretkey",
|
||||
"bucket": "somebucket"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}`, test_distro.TestDistroName, test_distro.TestArchName, test_distro.TestImageTypeName), http.StatusCreated, `
|
||||
{
|
||||
"href": "/api/composer/v2/compose",
|
||||
"kind": "ComposeId"
|
||||
}`, "id")
|
||||
}
|
||||
|
||||
func TestComposeStatusSuccess(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
srv, wrksrv := newV2Server(t, dir)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "POST", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "%s",
|
||||
"image_requests":[{
|
||||
"architecture": "%s",
|
||||
"image_type": "%s",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_request": {
|
||||
"type": "aws.s3",
|
||||
"options": {
|
||||
"access_key_id": "somekey",
|
||||
"secret_access_key": "somesecretkey",
|
||||
"bucket": "somebucket"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}`, test_distro.TestDistroName, test_distro.TestArchName, test_distro.TestImageTypeName), http.StatusCreated, `
|
||||
{
|
||||
"href": "/api/composer/v2/compose",
|
||||
"kind": "ComposeId"
|
||||
}`, "id")
|
||||
|
||||
jobId, token, jobType, _, _, err := wrksrv.RequestJob(context.Background(), test_distro.TestArchName, []string{"osbuild"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "osbuild", jobType)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", fmt.Sprintf("/api/composer/v2/compose/%v", jobId), ``, http.StatusOK, fmt.Sprintf(`
|
||||
{
|
||||
"href": "/api/composer/v2/compose/%v",
|
||||
"kind": "ComposeStatus",
|
||||
"id": "%v",
|
||||
"image_status": {"status": "building"}
|
||||
}`, jobId, jobId))
|
||||
|
||||
// todo make it an osbuildjobresult
|
||||
res, err := json.Marshal(&worker.OSBuildJobResult{
|
||||
Success: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = wrksrv.FinishJob(token, res)
|
||||
require.NoError(t, err)
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", fmt.Sprintf("/api/composer/v2/compose/%v", jobId), ``, http.StatusOK, fmt.Sprintf(`
|
||||
{
|
||||
"href": "/api/composer/v2/compose/%v",
|
||||
"kind": "ComposeStatus",
|
||||
"id": "%v",
|
||||
"image_status": {"status": "success"}
|
||||
}`, jobId, jobId))
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", fmt.Sprintf("/api/composer/v2/compose/%v/metadata", jobId), ``, http.StatusInternalServerError, `
|
||||
{
|
||||
"href": "/api/composer/v2/errors/1012",
|
||||
"id": "1012",
|
||||
"kind": "Error",
|
||||
"code": "COMPOSER-1012",
|
||||
"reason": "OSBuildJobResult does not have expected fields set"
|
||||
}`, "operation_id")
|
||||
|
||||
}
|
||||
|
||||
func TestComposeStatusFailure(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
srv, wrksrv := newV2Server(t, dir)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "POST", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "%s",
|
||||
"image_requests":[{
|
||||
"architecture": "%s",
|
||||
"image_type": "%s",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_request": {
|
||||
"type": "aws.s3",
|
||||
"options": {
|
||||
"access_key_id": "somekey",
|
||||
"secret_access_key": "somesecretkey",
|
||||
"bucket": "somebucket"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}`, test_distro.TestDistroName, test_distro.TestArchName, test_distro.TestImageTypeName), http.StatusCreated, `
|
||||
{
|
||||
"href": "/api/composer/v2/compose",
|
||||
"kind": "ComposeId"
|
||||
}`, "id")
|
||||
|
||||
jobId, token, jobType, _, _, err := wrksrv.RequestJob(context.Background(), test_distro.TestArchName, []string{"osbuild"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "osbuild", jobType)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", fmt.Sprintf("/api/composer/v2/compose/%v", jobId), ``, http.StatusOK, fmt.Sprintf(`
|
||||
{
|
||||
"href": "/api/composer/v2/compose/%v",
|
||||
"kind": "ComposeStatus",
|
||||
"id": "%v",
|
||||
"image_status": {"status": "building"}
|
||||
}`, jobId, jobId))
|
||||
|
||||
err = wrksrv.FinishJob(token, nil)
|
||||
require.NoError(t, err)
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "GET", fmt.Sprintf("/api/composer/v2/compose/%v", jobId), ``, http.StatusOK, fmt.Sprintf(`
|
||||
{
|
||||
"href": "/api/composer/v2/compose/%v",
|
||||
"kind": "ComposeStatus",
|
||||
"id": "%v",
|
||||
"image_status": {"status": "failure"}
|
||||
}`, jobId, jobId))
|
||||
}
|
||||
|
||||
func TestComposeCustomizations(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
srv, _ := newV2Server(t, dir)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/composer/v2"), false, "POST", "/api/composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "%s",
|
||||
"customizations": {
|
||||
"subscription": {
|
||||
"organization": "2040324",
|
||||
"activation_key": "my-secret-key",
|
||||
"server_url": "subscription.rhsm.redhat.com",
|
||||
"base_url": "http://cdn.redhat.com/",
|
||||
"insights": true
|
||||
},
|
||||
"packages": [ "pkg1", "pkg2" ],
|
||||
"users": [{
|
||||
"name": "user1",
|
||||
"groups": [ "wheel" ],
|
||||
"key": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINrGKErMYi+MMUwuHaRAJmRLoIzRf2qD2dD5z0BTx/6x"
|
||||
}]
|
||||
},
|
||||
"image_requests":[{
|
||||
"architecture": "%s",
|
||||
"image_type": "%s",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_request": {
|
||||
"type": "aws.s3",
|
||||
"options": {
|
||||
"access_key_id": "somekey",
|
||||
"secret_access_key": "somesecretkey",
|
||||
"bucket": "somebucket"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}`, test_distro.TestDistroName, test_distro.TestArchName, test_distro.TestImageTypeName), http.StatusCreated, `
|
||||
{
|
||||
"href": "/api/composer/v2/compose",
|
||||
"kind": "ComposeId"
|
||||
}`, "id")
|
||||
}
|
||||
22
internal/common/operation_id.go
Normal file
22
internal/common/operation_id.go
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/segmentio/ksuid"
|
||||
)
|
||||
|
||||
const OperationIDKey string = "operationID"
|
||||
|
||||
// Adds a time-sortable globally unique identifier to an echo.Context if not already set
|
||||
func OperationIDMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
if c.Get(OperationIDKey) == nil {
|
||||
c.Set(OperationIDKey, GenerateOperationID())
|
||||
}
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateOperationID() string {
|
||||
return ksuid.New().String()
|
||||
}
|
||||
|
|
@ -58,6 +58,7 @@ BuildRequires: golang(github.com/google/go-cmp/cmp)
|
|||
BuildRequires: golang(github.com/gophercloud/gophercloud)
|
||||
BuildRequires: golang(github.com/prometheus/client_golang/prometheus/promhttp)
|
||||
BuildRequires: golang(github.com/openshift-online/ocm-sdk-go)
|
||||
BuildRequires: golang(github.com/segmentio/ksuid)
|
||||
BuildRequires: golang(github.com/stretchr/testify/assert)
|
||||
BuildRequires: golang(github.com/ubccr/kerby)
|
||||
BuildRequires: golang(github.com/vmware/govmomi)
|
||||
|
|
@ -107,7 +108,8 @@ Obsoletes: osbuild-composer-koji <= 23
|
|||
# generated code compatible by applying some sed magic.
|
||||
#
|
||||
# Remove when F33 is EOL
|
||||
sed -i "s/openapi3.Swagger/openapi3.T/;s/openapi3.NewSwaggerLoader().LoadSwaggerFromData/openapi3.NewLoader().LoadFromData/" internal/cloudapi/openapi.gen.go
|
||||
sed -i "s/openapi3.Swagger/openapi3.T/;s/openapi3.NewSwaggerLoader().LoadSwaggerFromData/openapi3.NewLoader().LoadFromData/" internal/cloudapi/v1/openapi.v1.gen.go
|
||||
sed -i "s/openapi3.Swagger/openapi3.T/;s/openapi3.NewSwaggerLoader().LoadSwaggerFromData/openapi3.NewLoader().LoadFromData/" internal/cloudapi/v2/openapi.v2.gen.go
|
||||
%endif
|
||||
|
||||
%build
|
||||
|
|
|
|||
|
|
@ -656,6 +656,7 @@ function sendCompose() {
|
|||
|
||||
function waitForState() {
|
||||
local DESIRED_STATE="${1:-success}"
|
||||
local VERSION="${2:-v1}"
|
||||
while true
|
||||
do
|
||||
OUTPUT=$(curl \
|
||||
|
|
@ -664,7 +665,7 @@ function waitForState() {
|
|||
--cacert /etc/osbuild-composer/ca-crt.pem \
|
||||
--key /etc/osbuild-composer/client-key.pem \
|
||||
--cert /etc/osbuild-composer/client-crt.pem \
|
||||
https://localhost/api/composer/v1/compose/"$COMPOSE_ID")
|
||||
https://localhost/api/composer/"$VERSION"/compose/"$COMPOSE_ID")
|
||||
|
||||
COMPOSE_STATUS=$(echo "$OUTPUT" | jq -r '.image_status.status')
|
||||
UPLOAD_STATUS=$(echo "$OUTPUT" | jq -r '.image_status.upload_status.status')
|
||||
|
|
@ -713,6 +714,8 @@ sudo systemctl start "osbuild-worker@1"
|
|||
INIT_COMPOSES="$(collectMetrics)"
|
||||
sendCompose
|
||||
waitForState
|
||||
# Same state with v2
|
||||
waitForState "success" "v2"
|
||||
SUBS_COMPOSES="$(collectMetrics)"
|
||||
|
||||
test "$UPLOAD_STATUS" = "success"
|
||||
|
|
@ -947,6 +950,20 @@ function verifyInAWSS3() {
|
|||
echo "Commit ID returned from API does not match Commit ID in archive 😠"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# v2 has the same result
|
||||
API_COMMIT_ID_V2=$(curl \
|
||||
--silent \
|
||||
--show-error \
|
||||
--cacert /etc/osbuild-composer/ca-crt.pem \
|
||||
--key /etc/osbuild-composer/client-key.pem \
|
||||
--cert /etc/osbuild-composer/client-crt.pem \
|
||||
https://localhost/api/composer/v2/compose/"$COMPOSE_ID"/metadata | jq -r '.ostree_commit')
|
||||
|
||||
if [[ "${API_COMMIT_ID_V2}" != "${TAR_COMMIT_ID}" ]]; then
|
||||
echo "Commit ID returned from API v2 does not match Commit ID in archive 😠"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify image in Compute Engine on GCP
|
||||
|
|
|
|||
31
vendor/github.com/segmentio/ksuid/.gitignore
generated
vendored
Normal file
31
vendor/github.com/segmentio/ksuid/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
/ksuid
|
||||
|
||||
# Emacs
|
||||
*~
|
||||
|
||||
# govendor
|
||||
/vendor/*/
|
||||
21
vendor/github.com/segmentio/ksuid/LICENSE.md
generated
vendored
Normal file
21
vendor/github.com/segmentio/ksuid/LICENSE.md
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2017 Segment.io
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
234
vendor/github.com/segmentio/ksuid/README.md
generated
vendored
Normal file
234
vendor/github.com/segmentio/ksuid/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
# ksuid [](https://goreportcard.com/report/github.com/segmentio/ksuid) [](https://godoc.org/github.com/segmentio/ksuid) [](https://circleci.com/gh/segmentio/ksuid.svg?style=shield)
|
||||
|
||||
ksuid is an efficient, comprehensive, battle-tested Go library for
|
||||
generating and parsing a specific kind of globally unique identifier
|
||||
called a *KSUID*. This library serves as its reference implementation.
|
||||
|
||||
## Install
|
||||
```sh
|
||||
go get -u github.com/segmentio/ksuid
|
||||
```
|
||||
|
||||
## What is a KSUID?
|
||||
|
||||
KSUID is for K-Sortable Unique IDentifier. It is a kind of globally
|
||||
unique identifier similar to a [RFC 4122 UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier), built from the ground-up to be "naturally"
|
||||
sorted by generation timestamp without any special type-aware logic.
|
||||
|
||||
In short, running a set of KSUIDs through the UNIX `sort` command will result
|
||||
in a list ordered by generation time.
|
||||
|
||||
## Why use KSUIDs?
|
||||
|
||||
There are numerous methods for generating unique identifiers, so why KSUID?
|
||||
|
||||
1. Naturally ordered by generation time
|
||||
2. Collision-free, coordination-free, dependency-free
|
||||
3. Highly portable representations
|
||||
|
||||
Even if only one of these properties are important to you, KSUID is a great
|
||||
choice! :) Many projects chose to use KSUIDs *just* because the text
|
||||
representation is copy-and-paste friendly.
|
||||
|
||||
### 1. Naturally Ordered By Generation Time
|
||||
|
||||
Unlike the more ubiquitous UUIDv4, a KSUID contains a timestamp component
|
||||
that allows them to be loosely sorted by generation time. This is not a strong
|
||||
guarantee (an invariant) as it depends on wall clocks, but is still incredibly
|
||||
useful in practice. Both the binary and text representations will sort by
|
||||
creation time without any special sorting logic.
|
||||
|
||||
### 2. Collision-free, Coordination-free, Dependency-free
|
||||
|
||||
While RFC 4122 UUIDv1s *do* include a time component, there aren't enough
|
||||
bytes of randomness to provide strong protection against collisions
|
||||
(duplicates). With such a low amount of entropy, it is feasible for a
|
||||
malicious party to guess generated IDs, creating a problem for systems whose
|
||||
security is, implicitly or explicitly, sensitive to an adversary guessing
|
||||
identifiers.
|
||||
|
||||
To fit into a 64-bit number space, [Snowflake IDs](https://blog.twitter.com/2010/announcing-snowflake)
|
||||
and its derivatives require coordination to avoid collisions, which
|
||||
significantly increases the deployment complexity and operational burden.
|
||||
|
||||
A KSUID includes 128 bits of pseudorandom data ("entropy"). This number space
|
||||
is 64 times larger than the 122 bits used by the well-accepted RFC 4122 UUIDv4
|
||||
standard. The additional timestamp component can be considered "bonus entropy"
|
||||
which further decreases the probability of collisions, to the point of physical
|
||||
infeasibility in any practical implementation.
|
||||
|
||||
### Highly Portable Representations
|
||||
|
||||
The text *and* binary representations are lexicographically sortable, which
|
||||
allows them to be dropped into systems which do not natively support KSUIDs
|
||||
and retain their time-ordered property.
|
||||
|
||||
The text representation is an alphanumeric base62 encoding, so it "fits"
|
||||
anywhere alphanumeric strings are accepted. No delimiters are used, so
|
||||
stringified KSUIDs won't be inadvertently truncated or tokenized when
|
||||
interpreted by software that is designed for human-readable text, a common
|
||||
problem for the text representation of RFC 4122 UUIDs.
|
||||
|
||||
## How do KSUIDs work?
|
||||
|
||||
Binary KSUIDs are 20-bytes: a 32-bit unsigned integer UTC timestamp and
|
||||
a 128-bit randomly generated payload. The timestamp uses big-endian
|
||||
encoding, to support lexicographic sorting. The timestamp epoch is adjusted
|
||||
to March 5th, 2014, providing over 100 years of life. The payload is
|
||||
generated by a cryptographically-strong pseudorandom number generator.
|
||||
|
||||
The text representation is always 27 characters, encoded in alphanumeric
|
||||
base62 that will lexicographically sort by timestamp.
|
||||
|
||||
## High Performance
|
||||
|
||||
This library is designed to be used in code paths that are performance
|
||||
critical. Its code has been tuned to eliminate all non-essential
|
||||
overhead. The `KSUID` type is derived from a fixed-size array, which
|
||||
eliminates the additional reference chasing and allocation involved in
|
||||
a variable-width type.
|
||||
|
||||
The API provides an interface for use in code paths which are sensitive
|
||||
to allocation. For example, the `Append` method can be used to parse the
|
||||
text representation and replace the contents of a `KSUID` value
|
||||
without additional heap allocation.
|
||||
|
||||
All public package level "pure" functions are concurrency-safe, protected
|
||||
by a global mutex. For hot loops that generate a large amount of KSUIDs
|
||||
from a single Goroutine, the `Sequence` type is provided to elide the
|
||||
potential contention.
|
||||
|
||||
By default, out of an abundance of caution, the cryptographically-secure
|
||||
PRNG is used to generate the random bits of a KSUID. This can be relaxed
|
||||
in extremely performance-critical code using the included `FastRander`
|
||||
type. `FastRander` uses the standard PRNG with a seed generated by the
|
||||
cryptographically-secure PRNG.
|
||||
|
||||
*_NOTE:_ While there is no evidence that `FastRander` will increase the
|
||||
probability of a collision, it shouldn't be used in scenarios where
|
||||
uniqueness is important to security, as there is an increased chance
|
||||
the generated IDs can be predicted by an adversary.*
|
||||
|
||||
## Battle Tested
|
||||
|
||||
This code has been used in production at Segment for several years,
|
||||
across a diverse array of projects. Trillions upon trillions of
|
||||
KSUIDs have been generated in some of Segment's most
|
||||
performance-critical, large-scale distributed systems.
|
||||
|
||||
## Plays Well With Others
|
||||
|
||||
Designed to be integrated with other libraries, the `KSUID` type
|
||||
implements many standard library interfaces, including:
|
||||
|
||||
* `Stringer`
|
||||
* `database/sql.Scanner` and `database/sql/driver.Valuer`
|
||||
* `encoding.BinaryMarshal` and `encoding.BinaryUnmarshal`
|
||||
* `encoding.TextMarshal` and `encoding.TextUnmarshal`
|
||||
(`encoding/json` friendly!)
|
||||
|
||||
## Command Line Tool
|
||||
|
||||
This package comes with a command-line tool `ksuid`, useful for
|
||||
generating KSUIDs as well as inspecting the internal components of
|
||||
existing KSUIDs. Machine-friendly output is provided for scripting
|
||||
use cases.
|
||||
|
||||
Given a Go build environment, it can be installed with the command:
|
||||
|
||||
```sh
|
||||
$ go install github.com/segmentio/ksuid/cmd/ksuid
|
||||
```
|
||||
|
||||
## CLI Usage Examples
|
||||
|
||||
### Generate a KSUID
|
||||
|
||||
```sh
|
||||
$ ksuid
|
||||
0ujsswThIGTUYm2K8FjOOfXtY1K
|
||||
```
|
||||
|
||||
### Generate 4 KSUIDs
|
||||
|
||||
```sh
|
||||
$ ksuid -n 4
|
||||
0ujsszwN8NRY24YaXiTIE2VWDTS
|
||||
0ujsswThIGTUYm2K8FjOOfXtY1K
|
||||
0ujssxh0cECutqzMgbtXSGnjorm
|
||||
0ujsszgFvbiEr7CDgE3z8MAUPFt
|
||||
```
|
||||
|
||||
### Inspect the components of a KSUID
|
||||
|
||||
```sh
|
||||
$ ksuid -f inspect 0ujtsYcgvSTl8PAuAdqWYSMnLOv
|
||||
|
||||
REPRESENTATION:
|
||||
|
||||
String: 0ujtsYcgvSTl8PAuAdqWYSMnLOv
|
||||
Raw: 0669F7EFB5A1CD34B5F99D1154FB6853345C9735
|
||||
|
||||
COMPONENTS:
|
||||
|
||||
Time: 2017-10-09 21:00:47 -0700 PDT
|
||||
Timestamp: 107608047
|
||||
Payload: B5A1CD34B5F99D1154FB6853345C9735
|
||||
```
|
||||
|
||||
### Generate a KSUID and inspect its components
|
||||
|
||||
```sh
|
||||
$ ksuid -f inspect
|
||||
|
||||
REPRESENTATION:
|
||||
|
||||
String: 0ujzPyRiIAffKhBux4PvQdDqMHY
|
||||
Raw: 066A029C73FC1AA3B2446246D6E89FCD909E8FE8
|
||||
|
||||
COMPONENTS:
|
||||
|
||||
Time: 2017-10-09 21:46:20 -0700 PDT
|
||||
Timestamp: 107610780
|
||||
Payload: 73FC1AA3B2446246D6E89FCD909E8FE8
|
||||
|
||||
```
|
||||
|
||||
### Inspect a KSUID with template formatted inspection output
|
||||
|
||||
```sh
|
||||
$ ksuid -f template -t '{{ .Time }}: {{ .Payload }}' 0ujtsYcgvSTl8PAuAdqWYSMnLOv
|
||||
2017-10-09 21:00:47 -0700 PDT: B5A1CD34B5F99D1154FB6853345C9735
|
||||
```
|
||||
|
||||
### Inspect multiple KSUIDs with template formatted output
|
||||
|
||||
```sh
|
||||
$ ksuid -f template -t '{{ .Time }}: {{ .Payload }}' $(ksuid -n 4)
|
||||
2017-10-09 21:05:37 -0700 PDT: 304102BC687E087CC3A811F21D113CCF
|
||||
2017-10-09 21:05:37 -0700 PDT: EAF0B240A9BFA55E079D887120D962F0
|
||||
2017-10-09 21:05:37 -0700 PDT: DF0761769909ABB0C7BB9D66F79FC041
|
||||
2017-10-09 21:05:37 -0700 PDT: 1A8F0E3D0BDEB84A5FAD702876F46543
|
||||
```
|
||||
|
||||
### Generate KSUIDs and output JSON using template formatting
|
||||
|
||||
```sh
|
||||
$ ksuid -f template -t '{ "timestamp": "{{ .Timestamp }}", "payload": "{{ .Payload }}", "ksuid": "{{.String}}"}' -n 4
|
||||
{ "timestamp": "107611700", "payload": "9850EEEC191BF4FF26F99315CE43B0C8", "ksuid": "0uk1Hbc9dQ9pxyTqJ93IUrfhdGq"}
|
||||
{ "timestamp": "107611700", "payload": "CC55072555316F45B8CA2D2979D3ED0A", "ksuid": "0uk1HdCJ6hUZKDgcxhpJwUl5ZEI"}
|
||||
{ "timestamp": "107611700", "payload": "BA1C205D6177F0992D15EE606AE32238", "ksuid": "0uk1HcdvF0p8C20KtTfdRSB9XIm"}
|
||||
{ "timestamp": "107611700", "payload": "67517BA309EA62AE7991B27BB6F2FCAC", "ksuid": "0uk1Ha7hGJ1Q9Xbnkt0yZgNwg3g"}
|
||||
```
|
||||
|
||||
## Implementations for other languages
|
||||
|
||||
- Python: [svix-ksuid](https://github.com/svixhq/python-ksuid/)
|
||||
- Ruby: [ksuid-ruby](https://github.com/michaelherold/ksuid-ruby)
|
||||
- Java: [ksuid](https://github.com/ksuid/ksuid)
|
||||
- Rust: [rksuid](https://github.com/nharring/rksuid)
|
||||
- dotNet: [Ksuid.Net](https://github.com/JoyMoe/Ksuid.Net)
|
||||
|
||||
## License
|
||||
|
||||
ksuid source code is available under an MIT [License](/LICENSE.md).
|
||||
202
vendor/github.com/segmentio/ksuid/base62.go
generated
vendored
Normal file
202
vendor/github.com/segmentio/ksuid/base62.go
generated
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
package ksuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// lexographic ordering (based on Unicode table) is 0-9A-Za-z
|
||||
base62Characters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
zeroString = "000000000000000000000000000"
|
||||
offsetUppercase = 10
|
||||
offsetLowercase = 36
|
||||
)
|
||||
|
||||
var (
|
||||
errShortBuffer = errors.New("the output buffer is too small to hold to decoded value")
|
||||
)
|
||||
|
||||
// Converts a base 62 byte into the number value that it represents.
|
||||
func base62Value(digit byte) byte {
|
||||
switch {
|
||||
case digit >= '0' && digit <= '9':
|
||||
return digit - '0'
|
||||
case digit >= 'A' && digit <= 'Z':
|
||||
return offsetUppercase + (digit - 'A')
|
||||
default:
|
||||
return offsetLowercase + (digit - 'a')
|
||||
}
|
||||
}
|
||||
|
||||
// This function encodes the base 62 representation of the src KSUID in binary
|
||||
// form into dst.
|
||||
//
|
||||
// In order to support a couple of optimizations the function assumes that src
|
||||
// is 20 bytes long and dst is 27 bytes long.
|
||||
//
|
||||
// Any unused bytes in dst will be set to the padding '0' byte.
|
||||
func fastEncodeBase62(dst []byte, src []byte) {
|
||||
const srcBase = 4294967296
|
||||
const dstBase = 62
|
||||
|
||||
// Split src into 5 4-byte words, this is where most of the efficiency comes
|
||||
// from because this is a O(N^2) algorithm, and we make N = N / 4 by working
|
||||
// on 32 bits at a time.
|
||||
parts := [5]uint32{
|
||||
binary.BigEndian.Uint32(src[0:4]),
|
||||
binary.BigEndian.Uint32(src[4:8]),
|
||||
binary.BigEndian.Uint32(src[8:12]),
|
||||
binary.BigEndian.Uint32(src[12:16]),
|
||||
binary.BigEndian.Uint32(src[16:20]),
|
||||
}
|
||||
|
||||
n := len(dst)
|
||||
bp := parts[:]
|
||||
bq := [5]uint32{}
|
||||
|
||||
for len(bp) != 0 {
|
||||
quotient := bq[:0]
|
||||
remainder := uint64(0)
|
||||
|
||||
for _, c := range bp {
|
||||
value := uint64(c) + uint64(remainder)*srcBase
|
||||
digit := value / dstBase
|
||||
remainder = value % dstBase
|
||||
|
||||
if len(quotient) != 0 || digit != 0 {
|
||||
quotient = append(quotient, uint32(digit))
|
||||
}
|
||||
}
|
||||
|
||||
// Writes at the end of the destination buffer because we computed the
|
||||
// lowest bits first.
|
||||
n--
|
||||
dst[n] = base62Characters[remainder]
|
||||
bp = quotient
|
||||
}
|
||||
|
||||
// Add padding at the head of the destination buffer for all bytes that were
|
||||
// not set.
|
||||
copy(dst[:n], zeroString)
|
||||
}
|
||||
|
||||
// This function appends the base 62 representation of the KSUID in src to dst,
|
||||
// and returns the extended byte slice.
|
||||
// The result is left-padded with '0' bytes to always append 27 bytes to the
|
||||
// destination buffer.
|
||||
func fastAppendEncodeBase62(dst []byte, src []byte) []byte {
|
||||
dst = reserve(dst, stringEncodedLength)
|
||||
n := len(dst)
|
||||
fastEncodeBase62(dst[n:n+stringEncodedLength], src)
|
||||
return dst[:n+stringEncodedLength]
|
||||
}
|
||||
|
||||
// This function decodes the base 62 representation of the src KSUID to the
|
||||
// binary form into dst.
|
||||
//
|
||||
// In order to support a couple of optimizations the function assumes that src
|
||||
// is 27 bytes long and dst is 20 bytes long.
|
||||
//
|
||||
// Any unused bytes in dst will be set to zero.
|
||||
func fastDecodeBase62(dst []byte, src []byte) error {
|
||||
const srcBase = 62
|
||||
const dstBase = 4294967296
|
||||
|
||||
// This line helps BCE (Bounds Check Elimination).
|
||||
// It may be safely removed.
|
||||
_ = src[26]
|
||||
|
||||
parts := [27]byte{
|
||||
base62Value(src[0]),
|
||||
base62Value(src[1]),
|
||||
base62Value(src[2]),
|
||||
base62Value(src[3]),
|
||||
base62Value(src[4]),
|
||||
base62Value(src[5]),
|
||||
base62Value(src[6]),
|
||||
base62Value(src[7]),
|
||||
base62Value(src[8]),
|
||||
base62Value(src[9]),
|
||||
|
||||
base62Value(src[10]),
|
||||
base62Value(src[11]),
|
||||
base62Value(src[12]),
|
||||
base62Value(src[13]),
|
||||
base62Value(src[14]),
|
||||
base62Value(src[15]),
|
||||
base62Value(src[16]),
|
||||
base62Value(src[17]),
|
||||
base62Value(src[18]),
|
||||
base62Value(src[19]),
|
||||
|
||||
base62Value(src[20]),
|
||||
base62Value(src[21]),
|
||||
base62Value(src[22]),
|
||||
base62Value(src[23]),
|
||||
base62Value(src[24]),
|
||||
base62Value(src[25]),
|
||||
base62Value(src[26]),
|
||||
}
|
||||
|
||||
n := len(dst)
|
||||
bp := parts[:]
|
||||
bq := [stringEncodedLength]byte{}
|
||||
|
||||
for len(bp) > 0 {
|
||||
quotient := bq[:0]
|
||||
remainder := uint64(0)
|
||||
|
||||
for _, c := range bp {
|
||||
value := uint64(c) + uint64(remainder)*srcBase
|
||||
digit := value / dstBase
|
||||
remainder = value % dstBase
|
||||
|
||||
if len(quotient) != 0 || digit != 0 {
|
||||
quotient = append(quotient, byte(digit))
|
||||
}
|
||||
}
|
||||
|
||||
if n < 4 {
|
||||
return errShortBuffer
|
||||
}
|
||||
|
||||
dst[n-4] = byte(remainder >> 24)
|
||||
dst[n-3] = byte(remainder >> 16)
|
||||
dst[n-2] = byte(remainder >> 8)
|
||||
dst[n-1] = byte(remainder)
|
||||
n -= 4
|
||||
bp = quotient
|
||||
}
|
||||
|
||||
var zero [20]byte
|
||||
copy(dst[:n], zero[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function appends the base 62 decoded version of src into dst.
|
||||
func fastAppendDecodeBase62(dst []byte, src []byte) []byte {
|
||||
dst = reserve(dst, byteLength)
|
||||
n := len(dst)
|
||||
fastDecodeBase62(dst[n:n+byteLength], src)
|
||||
return dst[:n+byteLength]
|
||||
}
|
||||
|
||||
// Ensures that at least nbytes are available in the remaining capacity of the
|
||||
// destination slice, if not, a new copy is made and returned by the function.
|
||||
func reserve(dst []byte, nbytes int) []byte {
|
||||
c := cap(dst)
|
||||
n := len(dst)
|
||||
|
||||
if avail := c - n; avail < nbytes {
|
||||
c *= 2
|
||||
if (c - n) < nbytes {
|
||||
c = n + nbytes
|
||||
}
|
||||
b := make([]byte, n, c)
|
||||
copy(b, dst)
|
||||
dst = b
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
3
vendor/github.com/segmentio/ksuid/go.mod
generated
vendored
Normal file
3
vendor/github.com/segmentio/ksuid/go.mod
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/segmentio/ksuid
|
||||
|
||||
go 1.12
|
||||
352
vendor/github.com/segmentio/ksuid/ksuid.go
generated
vendored
Normal file
352
vendor/github.com/segmentio/ksuid/ksuid.go
generated
vendored
Normal file
|
|
@ -0,0 +1,352 @@
|
|||
package ksuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// KSUID's epoch starts more recently so that the 32-bit number space gives a
|
||||
// significantly higher useful lifetime of around 136 years from March 2017.
|
||||
// This number (14e8) was picked to be easy to remember.
|
||||
epochStamp int64 = 1400000000
|
||||
|
||||
// Timestamp is a uint32
|
||||
timestampLengthInBytes = 4
|
||||
|
||||
// Payload is 16-bytes
|
||||
payloadLengthInBytes = 16
|
||||
|
||||
// KSUIDs are 20 bytes when binary encoded
|
||||
byteLength = timestampLengthInBytes + payloadLengthInBytes
|
||||
|
||||
// The length of a KSUID when string (base62) encoded
|
||||
stringEncodedLength = 27
|
||||
|
||||
// A string-encoded minimum value for a KSUID
|
||||
minStringEncoded = "000000000000000000000000000"
|
||||
|
||||
// A string-encoded maximum value for a KSUID
|
||||
maxStringEncoded = "aWgEPTl1tmebfsQzFP4bxwgy80V"
|
||||
)
|
||||
|
||||
// KSUIDs are 20 bytes:
|
||||
// 00-03 byte: uint32 BE UTC timestamp with custom epoch
|
||||
// 04-19 byte: random "payload"
|
||||
type KSUID [byteLength]byte
|
||||
|
||||
var (
|
||||
rander = rand.Reader
|
||||
randMutex = sync.Mutex{}
|
||||
randBuffer = [payloadLengthInBytes]byte{}
|
||||
|
||||
errSize = fmt.Errorf("Valid KSUIDs are %v bytes", byteLength)
|
||||
errStrSize = fmt.Errorf("Valid encoded KSUIDs are %v characters", stringEncodedLength)
|
||||
errStrValue = fmt.Errorf("Valid encoded KSUIDs are bounded by %s and %s", minStringEncoded, maxStringEncoded)
|
||||
errPayloadSize = fmt.Errorf("Valid KSUID payloads are %v bytes", payloadLengthInBytes)
|
||||
|
||||
// Represents a completely empty (invalid) KSUID
|
||||
Nil KSUID
|
||||
// Represents the highest value a KSUID can have
|
||||
Max = KSUID{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
|
||||
)
|
||||
|
||||
// Append appends the string representation of i to b, returning a slice to a
|
||||
// potentially larger memory area.
|
||||
func (i KSUID) Append(b []byte) []byte {
|
||||
return fastAppendEncodeBase62(b, i[:])
|
||||
}
|
||||
|
||||
// The timestamp portion of the ID as a Time object
|
||||
func (i KSUID) Time() time.Time {
|
||||
return correctedUTCTimestampToTime(i.Timestamp())
|
||||
}
|
||||
|
||||
// The timestamp portion of the ID as a bare integer which is uncorrected
|
||||
// for KSUID's special epoch.
|
||||
func (i KSUID) Timestamp() uint32 {
|
||||
return binary.BigEndian.Uint32(i[:timestampLengthInBytes])
|
||||
}
|
||||
|
||||
// The 16-byte random payload without the timestamp
|
||||
func (i KSUID) Payload() []byte {
|
||||
return i[timestampLengthInBytes:]
|
||||
}
|
||||
|
||||
// String-encoded representation that can be passed through Parse()
|
||||
func (i KSUID) String() string {
|
||||
return string(i.Append(make([]byte, 0, stringEncodedLength)))
|
||||
}
|
||||
|
||||
// Raw byte representation of KSUID
|
||||
func (i KSUID) Bytes() []byte {
|
||||
// Safe because this is by-value
|
||||
return i[:]
|
||||
}
|
||||
|
||||
// IsNil returns true if this is a "nil" KSUID
|
||||
func (i KSUID) IsNil() bool {
|
||||
return i == Nil
|
||||
}
|
||||
|
||||
// Get satisfies the flag.Getter interface, making it possible to use KSUIDs as
|
||||
// part of of the command line options of a program.
|
||||
func (i KSUID) Get() interface{} {
|
||||
return i
|
||||
}
|
||||
|
||||
// Set satisfies the flag.Value interface, making it possible to use KSUIDs as
|
||||
// part of of the command line options of a program.
|
||||
func (i *KSUID) Set(s string) error {
|
||||
return i.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
func (i KSUID) MarshalText() ([]byte, error) {
|
||||
return []byte(i.String()), nil
|
||||
}
|
||||
|
||||
func (i KSUID) MarshalBinary() ([]byte, error) {
|
||||
return i.Bytes(), nil
|
||||
}
|
||||
|
||||
func (i *KSUID) UnmarshalText(b []byte) error {
|
||||
id, err := Parse(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = id
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *KSUID) UnmarshalBinary(b []byte) error {
|
||||
id, err := FromBytes(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value converts the KSUID into a SQL driver value which can be used to
|
||||
// directly use the KSUID as parameter to a SQL query.
|
||||
func (i KSUID) Value() (driver.Value, error) {
|
||||
if i.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
return i.String(), nil
|
||||
}
|
||||
|
||||
// Scan implements the sql.Scanner interface. It supports converting from
|
||||
// string, []byte, or nil into a KSUID value. Attempting to convert from
|
||||
// another type will return an error.
|
||||
func (i *KSUID) Scan(src interface{}) error {
|
||||
switch v := src.(type) {
|
||||
case nil:
|
||||
return i.scan(nil)
|
||||
case []byte:
|
||||
return i.scan(v)
|
||||
case string:
|
||||
return i.scan([]byte(v))
|
||||
default:
|
||||
return fmt.Errorf("Scan: unable to scan type %T into KSUID", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *KSUID) scan(b []byte) error {
|
||||
switch len(b) {
|
||||
case 0:
|
||||
*i = Nil
|
||||
return nil
|
||||
case byteLength:
|
||||
return i.UnmarshalBinary(b)
|
||||
case stringEncodedLength:
|
||||
return i.UnmarshalText(b)
|
||||
default:
|
||||
return errSize
|
||||
}
|
||||
}
|
||||
|
||||
// Parse decodes a string-encoded representation of a KSUID object
|
||||
func Parse(s string) (KSUID, error) {
|
||||
if len(s) != stringEncodedLength {
|
||||
return Nil, errStrSize
|
||||
}
|
||||
|
||||
src := [stringEncodedLength]byte{}
|
||||
dst := [byteLength]byte{}
|
||||
|
||||
copy(src[:], s[:])
|
||||
|
||||
if err := fastDecodeBase62(dst[:], src[:]); err != nil {
|
||||
return Nil, errStrValue
|
||||
}
|
||||
|
||||
return FromBytes(dst[:])
|
||||
}
|
||||
|
||||
func timeToCorrectedUTCTimestamp(t time.Time) uint32 {
|
||||
return uint32(t.Unix() - epochStamp)
|
||||
}
|
||||
|
||||
func correctedUTCTimestampToTime(ts uint32) time.Time {
|
||||
return time.Unix(int64(ts)+epochStamp, 0)
|
||||
}
|
||||
|
||||
// Generates a new KSUID. In the strange case that random bytes
|
||||
// can't be read, it will panic.
|
||||
func New() KSUID {
|
||||
ksuid, err := NewRandom()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't generate KSUID, inconceivable! error: %v", err))
|
||||
}
|
||||
return ksuid
|
||||
}
|
||||
|
||||
// Generates a new KSUID
|
||||
func NewRandom() (ksuid KSUID, err error) {
|
||||
return NewRandomWithTime(time.Now())
|
||||
}
|
||||
|
||||
func NewRandomWithTime(t time.Time) (ksuid KSUID, err error) {
|
||||
// Go's default random number generators are not safe for concurrent use by
|
||||
// multiple goroutines, the use of the rander and randBuffer are explicitly
|
||||
// synchronized here.
|
||||
randMutex.Lock()
|
||||
|
||||
_, err = io.ReadAtLeast(rander, randBuffer[:], len(randBuffer))
|
||||
copy(ksuid[timestampLengthInBytes:], randBuffer[:])
|
||||
|
||||
randMutex.Unlock()
|
||||
|
||||
if err != nil {
|
||||
ksuid = Nil // don't leak random bytes on error
|
||||
return
|
||||
}
|
||||
|
||||
ts := timeToCorrectedUTCTimestamp(t)
|
||||
binary.BigEndian.PutUint32(ksuid[:timestampLengthInBytes], ts)
|
||||
return
|
||||
}
|
||||
|
||||
// Constructs a KSUID from constituent parts
|
||||
func FromParts(t time.Time, payload []byte) (KSUID, error) {
|
||||
if len(payload) != payloadLengthInBytes {
|
||||
return Nil, errPayloadSize
|
||||
}
|
||||
|
||||
var ksuid KSUID
|
||||
|
||||
ts := timeToCorrectedUTCTimestamp(t)
|
||||
binary.BigEndian.PutUint32(ksuid[:timestampLengthInBytes], ts)
|
||||
|
||||
copy(ksuid[timestampLengthInBytes:], payload)
|
||||
|
||||
return ksuid, nil
|
||||
}
|
||||
|
||||
// Constructs a KSUID from a 20-byte binary representation
|
||||
func FromBytes(b []byte) (KSUID, error) {
|
||||
var ksuid KSUID
|
||||
|
||||
if len(b) != byteLength {
|
||||
return Nil, errSize
|
||||
}
|
||||
|
||||
copy(ksuid[:], b)
|
||||
return ksuid, nil
|
||||
}
|
||||
|
||||
// Sets the global source of random bytes for KSUID generation. This
|
||||
// should probably only be set once globally. While this is technically
|
||||
// thread-safe as in it won't cause corruption, there's no guarantee
|
||||
// on ordering.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// Implements comparison for KSUID type
|
||||
func Compare(a, b KSUID) int {
|
||||
return bytes.Compare(a[:], b[:])
|
||||
}
|
||||
|
||||
// Sorts the given slice of KSUIDs
|
||||
func Sort(ids []KSUID) {
|
||||
quickSort(ids, 0, len(ids)-1)
|
||||
}
|
||||
|
||||
// IsSorted checks whether a slice of KSUIDs is sorted
|
||||
func IsSorted(ids []KSUID) bool {
|
||||
if len(ids) != 0 {
|
||||
min := ids[0]
|
||||
for _, id := range ids[1:] {
|
||||
if bytes.Compare(min[:], id[:]) > 0 {
|
||||
return false
|
||||
}
|
||||
min = id
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func quickSort(a []KSUID, lo int, hi int) {
|
||||
if lo < hi {
|
||||
pivot := a[hi]
|
||||
i := lo - 1
|
||||
|
||||
for j, n := lo, hi; j != n; j++ {
|
||||
if bytes.Compare(a[j][:], pivot[:]) < 0 {
|
||||
i++
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
}
|
||||
|
||||
i++
|
||||
if bytes.Compare(a[hi][:], a[i][:]) < 0 {
|
||||
a[i], a[hi] = a[hi], a[i]
|
||||
}
|
||||
|
||||
quickSort(a, lo, i-1)
|
||||
quickSort(a, i+1, hi)
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the next KSUID after id.
|
||||
func (id KSUID) Next() KSUID {
|
||||
zero := makeUint128(0, 0)
|
||||
|
||||
t := id.Timestamp()
|
||||
u := uint128Payload(id)
|
||||
v := add128(u, makeUint128(0, 1))
|
||||
|
||||
if v == zero { // overflow
|
||||
t++
|
||||
}
|
||||
|
||||
return v.ksuid(t)
|
||||
}
|
||||
|
||||
// Prev returns the previoud KSUID before id.
|
||||
func (id KSUID) Prev() KSUID {
|
||||
max := makeUint128(math.MaxUint64, math.MaxUint64)
|
||||
|
||||
t := id.Timestamp()
|
||||
u := uint128Payload(id)
|
||||
v := sub128(u, makeUint128(0, 1))
|
||||
|
||||
if v == max { // overflow
|
||||
t--
|
||||
}
|
||||
|
||||
return v.ksuid(t)
|
||||
}
|
||||
55
vendor/github.com/segmentio/ksuid/rand.go
generated
vendored
Normal file
55
vendor/github.com/segmentio/ksuid/rand.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package ksuid
|
||||
|
||||
import (
|
||||
cryptoRand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
// FastRander is an io.Reader that uses math/rand and is optimized for
|
||||
// generating 16 bytes KSUID payloads. It is intended to be used as a
|
||||
// performance improvements for programs that have no need for
|
||||
// cryptographically secure KSUIDs and are generating a lot of them.
|
||||
var FastRander = newRBG()
|
||||
|
||||
func newRBG() io.Reader {
|
||||
r, err := newRandomBitsGenerator()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func newRandomBitsGenerator() (r io.Reader, err error) {
|
||||
var seed int64
|
||||
|
||||
if seed, err = readCryptoRandomSeed(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r = &randSourceReader{source: rand.NewSource(seed).(rand.Source64)}
|
||||
return
|
||||
}
|
||||
|
||||
func readCryptoRandomSeed() (seed int64, err error) {
|
||||
var b [8]byte
|
||||
|
||||
if _, err = io.ReadFull(cryptoRand.Reader, b[:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
seed = int64(binary.LittleEndian.Uint64(b[:]))
|
||||
return
|
||||
}
|
||||
|
||||
type randSourceReader struct {
|
||||
source rand.Source64
|
||||
}
|
||||
|
||||
func (r *randSourceReader) Read(b []byte) (int, error) {
|
||||
// optimized for generating 16 bytes payloads
|
||||
binary.LittleEndian.PutUint64(b[:8], r.source.Uint64())
|
||||
binary.LittleEndian.PutUint64(b[8:], r.source.Uint64())
|
||||
return 16, nil
|
||||
}
|
||||
55
vendor/github.com/segmentio/ksuid/sequence.go
generated
vendored
Normal file
55
vendor/github.com/segmentio/ksuid/sequence.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package ksuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
)
|
||||
|
||||
// Sequence is a KSUID generator which produces a sequence of ordered KSUIDs
|
||||
// from a seed.
|
||||
//
|
||||
// Up to 65536 KSUIDs can be generated by for a single seed.
|
||||
//
|
||||
// A typical usage of a Sequence looks like this:
|
||||
//
|
||||
// seq := ksuid.Sequence{
|
||||
// Seed: ksuid.New(),
|
||||
// }
|
||||
// id, err := seq.Next()
|
||||
//
|
||||
// Sequence values are not safe to use concurrently from multiple goroutines.
|
||||
type Sequence struct {
|
||||
// The seed is used as base for the KSUID generator, all generated KSUIDs
|
||||
// share the same leading 18 bytes of the seed.
|
||||
Seed KSUID
|
||||
count uint32 // uint32 for overflow, only 2 bytes are used
|
||||
}
|
||||
|
||||
// Next produces the next KSUID in the sequence, or returns an error if the
|
||||
// sequence has been exhausted.
|
||||
func (seq *Sequence) Next() (KSUID, error) {
|
||||
id := seq.Seed // copy
|
||||
count := seq.count
|
||||
if count > math.MaxUint16 {
|
||||
return Nil, errors.New("too many IDs were generated")
|
||||
}
|
||||
seq.count++
|
||||
return withSequenceNumber(id, uint16(count)), nil
|
||||
}
|
||||
|
||||
// Bounds returns the inclusive min and max bounds of the KSUIDs that may be
|
||||
// generated by the sequence. If all ids have been generated already then the
|
||||
// returned min value is equal to the max.
|
||||
func (seq *Sequence) Bounds() (min KSUID, max KSUID) {
|
||||
count := seq.count
|
||||
if count > math.MaxUint16 {
|
||||
count = math.MaxUint16
|
||||
}
|
||||
return withSequenceNumber(seq.Seed, uint16(count)), withSequenceNumber(seq.Seed, math.MaxUint16)
|
||||
}
|
||||
|
||||
func withSequenceNumber(id KSUID, n uint16) KSUID {
|
||||
binary.BigEndian.PutUint16(id[len(id)-2:], n)
|
||||
return id
|
||||
}
|
||||
343
vendor/github.com/segmentio/ksuid/set.go
generated
vendored
Normal file
343
vendor/github.com/segmentio/ksuid/set.go
generated
vendored
Normal file
|
|
@ -0,0 +1,343 @@
|
|||
package ksuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// CompressedSet is an immutable data type which stores a set of KSUIDs.
|
||||
type CompressedSet []byte
|
||||
|
||||
// Iter returns an iterator that produces all KSUIDs in the set.
|
||||
func (set CompressedSet) Iter() CompressedSetIter {
|
||||
return CompressedSetIter{
|
||||
content: []byte(set),
|
||||
}
|
||||
}
|
||||
|
||||
// String satisfies the fmt.Stringer interface, returns a human-readable string
|
||||
// representation of the set.
|
||||
func (set CompressedSet) String() string {
|
||||
b := bytes.Buffer{}
|
||||
b.WriteByte('[')
|
||||
set.writeTo(&b)
|
||||
b.WriteByte(']')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// String satisfies the fmt.GoStringer interface, returns a Go representation of
|
||||
// the set.
|
||||
func (set CompressedSet) GoString() string {
|
||||
b := bytes.Buffer{}
|
||||
b.WriteString("ksuid.CompressedSet{")
|
||||
set.writeTo(&b)
|
||||
b.WriteByte('}')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (set CompressedSet) writeTo(b *bytes.Buffer) {
|
||||
a := [27]byte{}
|
||||
|
||||
for i, it := 0, set.Iter(); it.Next(); i++ {
|
||||
if i != 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteByte('"')
|
||||
it.KSUID.Append(a[:0])
|
||||
b.Write(a[:])
|
||||
b.WriteByte('"')
|
||||
}
|
||||
}
|
||||
|
||||
// Compress creates and returns a compressed set of KSUIDs from the list given
|
||||
// as arguments.
|
||||
func Compress(ids ...KSUID) CompressedSet {
|
||||
c := 1 + byteLength + (len(ids) / 5)
|
||||
b := make([]byte, 0, c)
|
||||
return AppendCompressed(b, ids...)
|
||||
}
|
||||
|
||||
// AppendCompressed uses the given byte slice as pre-allocated storage space to
|
||||
// build a KSUID set.
|
||||
//
|
||||
// Note that the set uses a compression technique to store the KSUIDs, so the
|
||||
// resuling length is not 20 x len(ids). The rule of thumb here is for the given
|
||||
// byte slice to reserve the amount of memory that the application would be OK
|
||||
// to waste.
|
||||
func AppendCompressed(set []byte, ids ...KSUID) CompressedSet {
|
||||
if len(ids) != 0 {
|
||||
if !IsSorted(ids) {
|
||||
Sort(ids)
|
||||
}
|
||||
one := makeUint128(0, 1)
|
||||
|
||||
// The first KSUID is always written to the set, this is the starting
|
||||
// point for all deltas.
|
||||
set = append(set, byte(rawKSUID))
|
||||
set = append(set, ids[0][:]...)
|
||||
|
||||
timestamp := ids[0].Timestamp()
|
||||
lastKSUID := ids[0]
|
||||
lastValue := uint128Payload(ids[0])
|
||||
|
||||
for i := 1; i != len(ids); i++ {
|
||||
id := ids[i]
|
||||
|
||||
if id == lastKSUID {
|
||||
continue
|
||||
}
|
||||
|
||||
t := id.Timestamp()
|
||||
v := uint128Payload(id)
|
||||
|
||||
if t != timestamp {
|
||||
d := t - timestamp
|
||||
n := varintLength32(d)
|
||||
|
||||
set = append(set, timeDelta|byte(n))
|
||||
set = appendVarint32(set, d, n)
|
||||
set = append(set, id[timestampLengthInBytes:]...)
|
||||
|
||||
timestamp = t
|
||||
} else {
|
||||
d := sub128(v, lastValue)
|
||||
|
||||
if d != one {
|
||||
n := varintLength128(d)
|
||||
|
||||
set = append(set, payloadDelta|byte(n))
|
||||
set = appendVarint128(set, d, n)
|
||||
} else {
|
||||
l, c := rangeLength(ids[i+1:], t, id, v)
|
||||
m := uint64(l + 1)
|
||||
n := varintLength64(m)
|
||||
|
||||
set = append(set, payloadRange|byte(n))
|
||||
set = appendVarint64(set, m, n)
|
||||
|
||||
i += c
|
||||
id = ids[i]
|
||||
v = uint128Payload(id)
|
||||
}
|
||||
}
|
||||
|
||||
lastKSUID = id
|
||||
lastValue = v
|
||||
}
|
||||
}
|
||||
return CompressedSet(set)
|
||||
}
|
||||
|
||||
func rangeLength(ids []KSUID, timestamp uint32, lastKSUID KSUID, lastValue uint128) (length int, count int) {
|
||||
one := makeUint128(0, 1)
|
||||
|
||||
for i := range ids {
|
||||
id := ids[i]
|
||||
|
||||
if id == lastKSUID {
|
||||
continue
|
||||
}
|
||||
|
||||
if id.Timestamp() != timestamp {
|
||||
count = i
|
||||
return
|
||||
}
|
||||
|
||||
v := uint128Payload(id)
|
||||
|
||||
if sub128(v, lastValue) != one {
|
||||
count = i
|
||||
return
|
||||
}
|
||||
|
||||
lastKSUID = id
|
||||
lastValue = v
|
||||
length++
|
||||
}
|
||||
|
||||
count = len(ids)
|
||||
return
|
||||
}
|
||||
|
||||
func appendVarint128(b []byte, v uint128, n int) []byte {
|
||||
c := v.bytes()
|
||||
return append(b, c[len(c)-n:]...)
|
||||
}
|
||||
|
||||
func appendVarint64(b []byte, v uint64, n int) []byte {
|
||||
c := [8]byte{}
|
||||
binary.BigEndian.PutUint64(c[:], v)
|
||||
return append(b, c[len(c)-n:]...)
|
||||
}
|
||||
|
||||
func appendVarint32(b []byte, v uint32, n int) []byte {
|
||||
c := [4]byte{}
|
||||
binary.BigEndian.PutUint32(c[:], v)
|
||||
return append(b, c[len(c)-n:]...)
|
||||
}
|
||||
|
||||
func varint128(b []byte) uint128 {
|
||||
a := [16]byte{}
|
||||
copy(a[16-len(b):], b)
|
||||
return makeUint128FromPayload(a[:])
|
||||
}
|
||||
|
||||
func varint64(b []byte) uint64 {
|
||||
a := [8]byte{}
|
||||
copy(a[8-len(b):], b)
|
||||
return binary.BigEndian.Uint64(a[:])
|
||||
}
|
||||
|
||||
func varint32(b []byte) uint32 {
|
||||
a := [4]byte{}
|
||||
copy(a[4-len(b):], b)
|
||||
return binary.BigEndian.Uint32(a[:])
|
||||
}
|
||||
|
||||
func varintLength128(v uint128) int {
|
||||
if v[1] != 0 {
|
||||
return 8 + varintLength64(v[1])
|
||||
}
|
||||
return varintLength64(v[0])
|
||||
}
|
||||
|
||||
func varintLength64(v uint64) int {
|
||||
switch {
|
||||
case (v & 0xFFFFFFFFFFFFFF00) == 0:
|
||||
return 1
|
||||
case (v & 0xFFFFFFFFFFFF0000) == 0:
|
||||
return 2
|
||||
case (v & 0xFFFFFFFFFF000000) == 0:
|
||||
return 3
|
||||
case (v & 0xFFFFFFFF00000000) == 0:
|
||||
return 4
|
||||
case (v & 0xFFFFFF0000000000) == 0:
|
||||
return 5
|
||||
case (v & 0xFFFF000000000000) == 0:
|
||||
return 6
|
||||
case (v & 0xFF00000000000000) == 0:
|
||||
return 7
|
||||
default:
|
||||
return 8
|
||||
}
|
||||
}
|
||||
|
||||
func varintLength32(v uint32) int {
|
||||
switch {
|
||||
case (v & 0xFFFFFF00) == 0:
|
||||
return 1
|
||||
case (v & 0xFFFF0000) == 0:
|
||||
return 2
|
||||
case (v & 0xFF000000) == 0:
|
||||
return 3
|
||||
default:
|
||||
return 4
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
rawKSUID = 0
|
||||
timeDelta = (1 << 6)
|
||||
payloadDelta = (1 << 7)
|
||||
payloadRange = (1 << 6) | (1 << 7)
|
||||
)
|
||||
|
||||
// CompressedSetIter is an iterator type returned by Set.Iter to produce the
|
||||
// list of KSUIDs stored in a set.
|
||||
//
|
||||
// Here's is how the iterator type is commonly used:
|
||||
//
|
||||
// for it := set.Iter(); it.Next(); {
|
||||
// id := it.KSUID
|
||||
// // ...
|
||||
// }
|
||||
//
|
||||
// CompressedSetIter values are not safe to use concurrently from multiple
|
||||
// goroutines.
|
||||
type CompressedSetIter struct {
|
||||
// KSUID is modified by calls to the Next method to hold the KSUID loaded
|
||||
// by the iterator.
|
||||
KSUID KSUID
|
||||
|
||||
content []byte
|
||||
offset int
|
||||
|
||||
seqlength uint64
|
||||
timestamp uint32
|
||||
lastValue uint128
|
||||
}
|
||||
|
||||
// Next moves the iterator forward, returning true if there a KSUID was found,
|
||||
// or false if the iterator as reached the end of the set it was created from.
|
||||
func (it *CompressedSetIter) Next() bool {
|
||||
if it.seqlength != 0 {
|
||||
value := incr128(it.lastValue)
|
||||
it.KSUID = value.ksuid(it.timestamp)
|
||||
it.seqlength--
|
||||
it.lastValue = value
|
||||
return true
|
||||
}
|
||||
|
||||
if it.offset == len(it.content) {
|
||||
return false
|
||||
}
|
||||
|
||||
b := it.content[it.offset]
|
||||
it.offset++
|
||||
|
||||
const mask = rawKSUID | timeDelta | payloadDelta | payloadRange
|
||||
tag := int(b) & mask
|
||||
cnt := int(b) & ^mask
|
||||
|
||||
switch tag {
|
||||
case rawKSUID:
|
||||
off0 := it.offset
|
||||
off1 := off0 + byteLength
|
||||
|
||||
copy(it.KSUID[:], it.content[off0:off1])
|
||||
|
||||
it.offset = off1
|
||||
it.timestamp = it.KSUID.Timestamp()
|
||||
it.lastValue = uint128Payload(it.KSUID)
|
||||
|
||||
case timeDelta:
|
||||
off0 := it.offset
|
||||
off1 := off0 + cnt
|
||||
off2 := off1 + payloadLengthInBytes
|
||||
|
||||
it.timestamp += varint32(it.content[off0:off1])
|
||||
|
||||
binary.BigEndian.PutUint32(it.KSUID[:timestampLengthInBytes], it.timestamp)
|
||||
copy(it.KSUID[timestampLengthInBytes:], it.content[off1:off2])
|
||||
|
||||
it.offset = off2
|
||||
it.lastValue = uint128Payload(it.KSUID)
|
||||
|
||||
case payloadDelta:
|
||||
off0 := it.offset
|
||||
off1 := off0 + cnt
|
||||
|
||||
delta := varint128(it.content[off0:off1])
|
||||
value := add128(it.lastValue, delta)
|
||||
|
||||
it.KSUID = value.ksuid(it.timestamp)
|
||||
it.offset = off1
|
||||
it.lastValue = value
|
||||
|
||||
case payloadRange:
|
||||
off0 := it.offset
|
||||
off1 := off0 + cnt
|
||||
|
||||
value := incr128(it.lastValue)
|
||||
it.KSUID = value.ksuid(it.timestamp)
|
||||
it.seqlength = varint64(it.content[off0:off1])
|
||||
it.offset = off1
|
||||
it.seqlength--
|
||||
it.lastValue = value
|
||||
|
||||
default:
|
||||
panic("KSUID set iterator is reading malformed data")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
141
vendor/github.com/segmentio/ksuid/uint128.go
generated
vendored
Normal file
141
vendor/github.com/segmentio/ksuid/uint128.go
generated
vendored
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
package ksuid
|
||||
|
||||
import "fmt"
|
||||
|
||||
// uint128 represents an unsigned 128 bits little endian integer.
|
||||
type uint128 [2]uint64
|
||||
|
||||
func uint128Payload(ksuid KSUID) uint128 {
|
||||
return makeUint128FromPayload(ksuid[timestampLengthInBytes:])
|
||||
}
|
||||
|
||||
func makeUint128(high uint64, low uint64) uint128 {
|
||||
return uint128{low, high}
|
||||
}
|
||||
|
||||
func makeUint128FromPayload(payload []byte) uint128 {
|
||||
return uint128{
|
||||
// low
|
||||
uint64(payload[8])<<56 |
|
||||
uint64(payload[9])<<48 |
|
||||
uint64(payload[10])<<40 |
|
||||
uint64(payload[11])<<32 |
|
||||
uint64(payload[12])<<24 |
|
||||
uint64(payload[13])<<16 |
|
||||
uint64(payload[14])<<8 |
|
||||
uint64(payload[15]),
|
||||
// high
|
||||
uint64(payload[0])<<56 |
|
||||
uint64(payload[1])<<48 |
|
||||
uint64(payload[2])<<40 |
|
||||
uint64(payload[3])<<32 |
|
||||
uint64(payload[4])<<24 |
|
||||
uint64(payload[5])<<16 |
|
||||
uint64(payload[6])<<8 |
|
||||
uint64(payload[7]),
|
||||
}
|
||||
}
|
||||
|
||||
func (v uint128) ksuid(timestamp uint32) KSUID {
|
||||
return KSUID{
|
||||
// time
|
||||
byte(timestamp >> 24),
|
||||
byte(timestamp >> 16),
|
||||
byte(timestamp >> 8),
|
||||
byte(timestamp),
|
||||
|
||||
// high
|
||||
byte(v[1] >> 56),
|
||||
byte(v[1] >> 48),
|
||||
byte(v[1] >> 40),
|
||||
byte(v[1] >> 32),
|
||||
byte(v[1] >> 24),
|
||||
byte(v[1] >> 16),
|
||||
byte(v[1] >> 8),
|
||||
byte(v[1]),
|
||||
|
||||
// low
|
||||
byte(v[0] >> 56),
|
||||
byte(v[0] >> 48),
|
||||
byte(v[0] >> 40),
|
||||
byte(v[0] >> 32),
|
||||
byte(v[0] >> 24),
|
||||
byte(v[0] >> 16),
|
||||
byte(v[0] >> 8),
|
||||
byte(v[0]),
|
||||
}
|
||||
}
|
||||
|
||||
func (v uint128) bytes() [16]byte {
|
||||
return [16]byte{
|
||||
// high
|
||||
byte(v[1] >> 56),
|
||||
byte(v[1] >> 48),
|
||||
byte(v[1] >> 40),
|
||||
byte(v[1] >> 32),
|
||||
byte(v[1] >> 24),
|
||||
byte(v[1] >> 16),
|
||||
byte(v[1] >> 8),
|
||||
byte(v[1]),
|
||||
|
||||
// low
|
||||
byte(v[0] >> 56),
|
||||
byte(v[0] >> 48),
|
||||
byte(v[0] >> 40),
|
||||
byte(v[0] >> 32),
|
||||
byte(v[0] >> 24),
|
||||
byte(v[0] >> 16),
|
||||
byte(v[0] >> 8),
|
||||
byte(v[0]),
|
||||
}
|
||||
}
|
||||
|
||||
func (v uint128) String() string {
|
||||
return fmt.Sprintf("0x%016X%016X", v[0], v[1])
|
||||
}
|
||||
|
||||
const wordBitSize = 64
|
||||
|
||||
func cmp128(x, y uint128) int {
|
||||
if x[1] < y[1] {
|
||||
return -1
|
||||
}
|
||||
if x[1] > y[1] {
|
||||
return 1
|
||||
}
|
||||
if x[0] < y[0] {
|
||||
return -1
|
||||
}
|
||||
if x[0] > y[0] {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func add128(x, y uint128) (z uint128) {
|
||||
x0 := x[0]
|
||||
y0 := y[0]
|
||||
z0 := x0 + y0
|
||||
z[0] = z0
|
||||
|
||||
c := (x0&y0 | (x0|y0)&^z0) >> (wordBitSize - 1)
|
||||
|
||||
z[1] = x[1] + y[1] + c
|
||||
return
|
||||
}
|
||||
|
||||
func sub128(x, y uint128) (z uint128) {
|
||||
x0 := x[0]
|
||||
y0 := y[0]
|
||||
z0 := x0 - y0
|
||||
z[0] = z0
|
||||
|
||||
c := (y0&^x0 | (y0|^x0)&z0) >> (wordBitSize - 1)
|
||||
|
||||
z[1] = x[1] - y[1] - c
|
||||
return
|
||||
}
|
||||
|
||||
func incr128(x uint128) uint128 {
|
||||
return add128(x, uint128{1, 0})
|
||||
}
|
||||
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
|
|
@ -302,6 +302,9 @@ github.com/prometheus/common/model
|
|||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/fs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
# github.com/segmentio/ksuid v1.0.4
|
||||
## explicit
|
||||
github.com/segmentio/ksuid
|
||||
# github.com/stretchr/testify v1.7.0
|
||||
## explicit
|
||||
github.com/stretchr/testify/assert
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue