dnfjson: cache cleanup

Added CleanCache() method to the solver that deletes all the caches if
the total size grows above a certain (configurable) limit
(default: 500 MiB).

The function is called externally to handle errors (usually log or
ignore completely) and to avoid calling multiple times for multiple
depsolves of a single request.

The cleanup is extremely simple and is meant as a placeholder for more
sophisticated cache management.  The goal is to simply avoid ballooning
cache sizes that might cause issues for users or our own services.
This commit is contained in:
Achilleas Koutsou 2022-05-30 18:32:44 +02:00 committed by Tom Gundersen
parent 8b4607c94f
commit 9fda1ff55f
6 changed files with 77 additions and 0 deletions

View file

@ -143,6 +143,11 @@ func main() {
solver := dnfjson.NewSolver(d.ModulePlatformID(), d.Releasever(), arch.Name(), path.Join(home, ".cache/osbuild-composer/rpmmd"))
solver.SetDNFJSONPath(findDnfJsonBin())
// Set cache size to 3 GiB
// osbuild-pipeline is often used to generate a lot of manifests in a row
// let the cache grow to fit much more repository metadata than we usually allow
solver.SetMaxCacheSize(3 * 1024 * 1024 * 1024)
packageSets := imageType.PackageSets(composeRequest.Blueprint, repos)
depsolvedSets := make(map[string][]rpmmd.PackageSpec)
@ -189,4 +194,8 @@ func main() {
}
}
os.Stdout.Write(bytes)
if err := solver.CleanCache(); err != nil {
// print to stderr but don't exit with error
fmt.Fprintf(os.Stderr, "Error during rpm repo cache cleanup: %s", err.Error())
}
}

View file

@ -30,6 +30,7 @@ func (impl *DepsolveJobImpl) depsolve(packageSets map[string][]rpmmd.PackageSet,
}
depsolvedSets[name] = res
}
return depsolvedSets, nil
}
@ -66,6 +67,10 @@ func (impl *DepsolveJobImpl) Run(job worker.Job) error {
logWithId.Errorf("rpmmd error in depsolve job: %v", err)
}
}
if err := impl.Solver.CleanCache(); err != nil {
// log and ignore
logWithId.Errorf("Error during rpm repo cache cleanup: %s", err.Error())
}
err = job.Update(&result)
if err != nil {

31
internal/dnfjson/cache.go Normal file
View file

@ -0,0 +1,31 @@
package dnfjson
import (
"io/fs"
"os"
"path/filepath"
)
func dirSize(path string) (uint64, error) {
var size uint64
sizer := func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
size += uint64(info.Size())
return nil
}
err := filepath.Walk(path, sizer)
return size, err
}
func (bs *BaseSolver) CleanCache() error {
curSize, err := dirSize(bs.cacheDir)
if err != nil {
return err
}
if curSize > bs.maxCacheSize {
return os.RemoveAll(bs.cacheDir)
}
return nil
}

View file

@ -34,6 +34,8 @@ type BaseSolver struct {
// Path to the dnf-json binary and optional args (default: "/usr/libexec/osbuild-composer/dnf-json")
dnfJsonCmd []string
maxCacheSize uint64
}
// Create a new unconfigured BaseSolver (without platform information). It can
@ -45,9 +47,14 @@ func NewBaseSolver(cacheDir string) *BaseSolver {
cacheDir: cacheDir,
subscriptions: subscriptions,
dnfJsonCmd: []string{"/usr/libexec/osbuild-composer/dnf-json"},
maxCacheSize: 524288000, // 500 MiB
}
}
func (s *BaseSolver) SetMaxCacheSize(size uint64) {
s.maxCacheSize = size
}
// SetDNFJSONPath sets the path to the dnf-json binary and optionally any command line arguments.
func (s *BaseSolver) SetDNFJSONPath(cmd string, args ...string) {
s.dnfJsonCmd = append([]string{cmd}, args...)

View file

@ -224,6 +224,11 @@ func (h *apiHandlers) PostCompose(ctx echo.Context) error {
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Could not initialize build with koji: %v", initResult.JobError.Reason))
}
if err := h.server.solver.CleanCache(); err != nil {
// log and ignore
log.Printf("Error during rpm repo cache cleanup: %s", err.Error())
}
return ctx.JSON(http.StatusCreated, &api.ComposeResponse{
Id: id.String(),
KojiBuildId: int(initResult.BuildID),

View file

@ -1272,6 +1272,10 @@ func (api *API) modulesInfoHandler(writer http.ResponseWriter, request *http.Req
}
packageInfos[i].Dependencies = solved
}
if err := solver.CleanCache(); err != nil {
// log and ignore
log.Printf("Error during rpm repo cache cleanup: %s", err.Error())
}
}
if modulesRequested {
@ -1348,6 +1352,10 @@ func (api *API) projectsDepsolveHandler(writer http.ResponseWriter, request *htt
statusResponseError(writer, http.StatusBadRequest, errors)
return
}
if err := solver.CleanCache(); err != nil {
// log and ignore
log.Printf("Error during rpm repo cache cleanup: %s", err.Error())
}
err = json.NewEncoder(writer).Encode(reply{Projects: deps})
common.PanicOnError(err)
}
@ -2148,6 +2156,10 @@ func (api *API) depsolveBlueprintForImageType(bp blueprint.Blueprint, imageType
}
depsolvedSets[name] = res
}
if err := solver.CleanCache(); err != nil {
// log and ignore
log.Printf("Error during rpm repo cache cleanup: %s", err.Error())
}
return depsolvedSets, nil
}
@ -3120,6 +3132,10 @@ func (api *API) fetchPackageList(distroName string) (rpmmd.PackageList, error) {
if err != nil {
return nil, err
}
if err := solver.CleanCache(); err != nil {
// log and ignore
log.Printf("Error during rpm repo cache cleanup: %s", err.Error())
}
return packages, nil
}
@ -3190,6 +3206,10 @@ func (api *API) depsolveBlueprint(bp blueprint.Blueprint) ([]rpmmd.PackageSpec,
return nil, err
}
if err := solver.CleanCache(); err != nil {
// log and ignore
log.Printf("Error during rpm repo cache cleanup: %s", err.Error())
}
return solved, nil
}