dnfjson: lock cache directory when cleaning

Apply a RWMutex lock to a cache directory.
A global map of cache locks is maintained, keyed by the absolute path to
the cache directory, so multiple cache instances can coexist and share
locks if they use the same cache root.

Currently, the lock only prevents multiple concurrent `shrink()`
operations when multiple cache instances share the same root.
This commit is contained in:
Achilleas Koutsou 2022-06-04 16:48:42 +02:00 committed by Tom Gundersen
parent 31f7040e05
commit fb34c69e91

View file

@ -6,11 +6,15 @@ import (
"os"
"path/filepath"
"sort"
"sync"
"time"
"github.com/gobwas/glob"
)
// global cache locker
var cacheLocks sync.Map
// A collection of directory paths, their total size, and their most recent
// modification time.
type pathInfo struct {
@ -34,14 +38,28 @@ type rpmCache struct {
// max cache size
maxSize uint64
// locker for this cache directory
locker *sync.RWMutex
}
func newRPMCache(path string, maxSize uint64) *rpmCache {
absPath, err := filepath.Abs(path) // convert to abs if it's not already
if err != nil {
panic(err) // can only happen if the CWD does not exist and the path isn't already absolute
}
path = absPath
locker := new(sync.RWMutex)
if l, loaded := cacheLocks.LoadOrStore(path, locker); loaded {
// value existed and was loaded
locker = l.(*sync.RWMutex)
}
r := &rpmCache{
root: path,
repoElements: make(map[string]pathInfo),
size: 0,
maxSize: maxSize,
locker: locker,
}
// collect existing cache paths and timestamps
r.updateInfo()
@ -121,6 +139,9 @@ func (r *rpmCache) updateInfo() {
}
func (r *rpmCache) shrink() error {
r.locker.Lock()
defer r.locker.Unlock()
// start deleting until we drop below r.maxSize
nDeleted := 0
for idx := 0; idx < len(r.repoRecency) && r.size >= r.maxSize; idx++ {