Revert "Revert "cloudapi/v1: Move depsolving to workers""
Workers now depsolve in parallel to image builds, so we can
again move depsolivng to the workers. This will help us deal
with increases in traffic as we currently only have one
depsolve handler per pod. It would also avoid any issues with
composer running out of disk space due to dnf metadata caches.
This reverts commit c65b1e9b26.
This commit is contained in:
parent
cfe9f7a87f
commit
f44acd0974
2 changed files with 35 additions and 27 deletions
|
|
@ -10,6 +10,7 @@ import (
|
|||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
|
|
@ -157,25 +158,40 @@ func (h *apiHandlers) Compose(ctx echo.Context) error {
|
|||
}
|
||||
|
||||
packageSets := imageType.PackageSets(bp)
|
||||
pkgSpecSets := make(map[string][]rpmmd.PackageSpec)
|
||||
for name, packages := range packageSets {
|
||||
pkgs, _, err := h.server.rpmMetadata.Depsolve(packages, repositories, distribution.ModulePlatformID(), arch.Name(), distribution.Releasever())
|
||||
if err != nil {
|
||||
var error_type int
|
||||
switch err.(type) {
|
||||
// Known DNF errors falls under BadRequest
|
||||
case *rpmmd.DNFError:
|
||||
error_type = http.StatusBadRequest
|
||||
// All other kind of errors are internal server Errors.
|
||||
// (json marshalling issues for instance)
|
||||
case error:
|
||||
error_type = http.StatusInternalServerError
|
||||
}
|
||||
return echo.NewHTTPError(error_type, "Failed to depsolve base packages for %s/%s/%s: %s", ir.ImageType, ir.Architecture, request.Distribution, err)
|
||||
}
|
||||
pkgSpecSets[name] = pkgs
|
||||
depsolveJobID, err := h.server.workers.EnqueueDepsolve(&worker.DepsolveJob{
|
||||
PackageSets: packageSets,
|
||||
Repos: repositories,
|
||||
ModulePlatformID: distribution.ModulePlatformID(),
|
||||
Arch: arch.Name(),
|
||||
Releasever: distribution.Releasever(),
|
||||
})
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to enqueue depsolve job")
|
||||
}
|
||||
|
||||
var depsolveResults worker.DepsolveJobResult
|
||||
for {
|
||||
status, _, err := h.server.workers.JobStatus(depsolveJobID, &depsolveResults)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Unable to get depsolve results")
|
||||
}
|
||||
if status.Canceled {
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Depsolving job canceled unexpectedly")
|
||||
}
|
||||
if !status.Finished.IsZero() {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
if depsolveResults.Error != "" {
|
||||
if depsolveResults.ErrorType == worker.DepsolveErrorType {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "Failed to depsolve requested package set: %s", depsolveResults.Error)
|
||||
}
|
||||
return echo.NewHTTPError(http.StatusInternalServerError, "Error while depsolving: %s", depsolveResults.Error)
|
||||
}
|
||||
pkgSpecSets := depsolveResults.PackageSpecs
|
||||
|
||||
imageOptions := distro.ImageOptions{Size: imageType.Size(0)}
|
||||
if request.Customizations != nil && request.Customizations.Subscription != nil {
|
||||
imageOptions.Subscription = &distro.SubscriptionImageOptions{
|
||||
|
|
|
|||
|
|
@ -706,17 +706,9 @@ function waitForState() {
|
|||
done
|
||||
}
|
||||
|
||||
# a pending shouldn't state shouldn't trip up the heartbeats
|
||||
sudo systemctl stop "osbuild-worker@*"
|
||||
sendCompose
|
||||
waitForState "pending"
|
||||
# jobs time out after 2 minutes, so 180 seconds gives ample time to make sure it
|
||||
# doesn't time out for pending jobs
|
||||
sleep 180
|
||||
waitForState "pending"
|
||||
|
||||
# crashed/stopped/killed worker should result in a failed state
|
||||
sudo systemctl start "osbuild-worker@1"
|
||||
waitForState "building"
|
||||
sudo systemctl stop "osbuild-worker@*"
|
||||
waitForState "failure"
|
||||
|
|
@ -734,8 +726,8 @@ test "$UPLOAD_STATUS" = "success"
|
|||
test "$UPLOAD_TYPE" = "$CLOUD_PROVIDER"
|
||||
test $((INIT_COMPOSES+1)) = "$SUBS_COMPOSES"
|
||||
|
||||
# Make sure we get 1 job entry in the db per compose
|
||||
sudo podman exec osbuild-composer-db psql -U postgres -d osbuildcomposer -c "SELECT * FROM jobs;" | grep "2 rows"
|
||||
# Make sure we get 2 job entries in the db per compose (depsolve + build)
|
||||
sudo podman exec osbuild-composer-db psql -U postgres -d osbuildcomposer -c "SELECT * FROM jobs;" | grep "4 rows"
|
||||
|
||||
#
|
||||
# Save the Manifest from the osbuild-composer store
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue