deps: update images to v0.18.0

Update the images dependency to v0.18.0
This includes a change in the Fedora IoT remote configuration which is
now installed through an RPM instead of being hard-coded in the image
definitions.
This commit is contained in:
Achilleas Koutsou 2023-11-20 14:39:27 +01:00 committed by Simon de Vlieger
parent bb76ddb2b1
commit 5b19bd6809
161 changed files with 17972 additions and 6525 deletions

View file

@ -10,15 +10,20 @@ import (
"github.com/opencontainers/go-digest"
)
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
// This is a heuristic/guess, and could well use a different value.
const replacementAttempts = 5
// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates,
// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2.
// This is a heuristic/guess, and could well use a different value.
const replacementUnknownLocationAttempts = 2
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type CandidateWithTime struct {
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
}
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
@ -77,9 +82,22 @@ func (css *candidateSortState) Swap(i, j int) {
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
}
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
// number of entries to limit, only to make testing simpler.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
func min(a, b int) int {
if a < b {
return a
}
return b
}
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
// number of entries to limit for known and unknown location separately, only to make testing simpler.
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
// split unknown candidates and known candidates
// and limit them seperately.
var knownLocationCandidates []CandidateWithTime
var unknownLocationCandidates []CandidateWithTime
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
@ -88,24 +106,34 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
primaryDigest: primaryDigest,
uncompressedDigest: uncompressedDigest,
})
resLength := len(cs)
if resLength > maxCandidates {
resLength = maxCandidates
for _, candidate := range cs {
if candidate.Candidate.UnknownLocation {
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
} else {
knownLocationCandidates = append(knownLocationCandidates, candidate)
}
}
res := make([]blobinfocache.BICReplacementCandidate2, resLength)
for i := range res {
res[i] = cs[i].Candidate
knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
remainingCapacity := totalLimit - knownLocationCandidatesUsed
unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates)))
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
for i := 0; i < knownLocationCandidatesUsed; i++ {
res[i] = knownLocationCandidates[i].Candidate
}
// If candidates with unknown location are found, lets add them to final list
for i := 0; i < unknownLocationCandidatesUsed; i++ {
res = append(res, unknownLocationCandidates[i].Candidate)
}
return res
}
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an
// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
//
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts)
}

View file

@ -133,24 +133,39 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
mem.compressors[blobDigest] = compressorName
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
// with corresponding compression info from mem.compressors, and returns the result of appending
// them to candidates. v2Output allows including candidates with unknown location, and filters out
// candidates with unknown compression.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
compressorName := blobinfocache.UnknownCompression
if v, ok := mem.compressors[digest]; ok {
compressorName = v
}
if compressorName == blobinfocache.UnknownCompression && v2Output {
return candidates
}
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
for l, t := range locations {
compressorName, compressorKnown := mem.compressors[digest]
if !compressorKnown {
if requireCompressionInfo {
continue
}
compressorName = blobinfocache.UnknownCompression
if len(locations) > 0 {
for l, t := range locations {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: l,
},
LastSeen: t,
})
}
} else if v2Output {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: l,
Digest: digest,
CompressorName: compressorName,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: t,
LastSeen: time.Time{},
})
}
return candidates
@ -166,7 +181,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
@ -176,23 +191,24 @@ func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope type
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
}
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
mem.mutex.Lock()
defer mem.mutex.Unlock()
res := []prioritize.CandidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
if otherDigests != nil {
for _, d := range otherDigests.Values() {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
}
}
}
if uncompressedDigest != primaryDigest {
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
}
}
}

View file

@ -57,7 +57,7 @@ type cache struct {
// The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool.
// Thats probably very applicable for database-backed services, where the database is the primary data store. Thats not necessarily
// the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly
// the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly
// incidental even to the image operations. Its also hard for us to use that model, because the public BlobInfoCache object doesnt have
// a Close method, so creating a lot of single-use caches could leak data.
//
@ -117,7 +117,7 @@ func (sqc *cache) Open() {
if sqc.refCount == 0 {
db, err := rawOpen(sqc.path)
if err != nil {
logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err)
logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err)
db = nil // But still increase sqc.refCount, because a .Close() will happen
}
sqc.db = db
@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
// dbTransaction calls fn within a read-write transaction in db.
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
// Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion.
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
var zeroRes T // A zero value of T
@ -249,7 +249,7 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
// * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
//
// Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups)
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups)
// is probably costlier than comparing a few more bytes of data.
//
// Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
@ -427,11 +427,13 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) {
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
// location, and filters out candidates with unknown compression.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
var rows *sql.Rows
var err error
if requireCompressionInfo {
if v2Output {
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
"ON KnownLocations.digest = DigestCompressors.digest "+
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
@ -448,6 +450,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
}
defer rows.Close()
res := []prioritize.CandidateWithTime{}
for rows.Next() {
var location string
var time time.Time
@ -455,7 +458,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
if err := rows.Scan(&location, &time, &compressorName); err != nil {
return nil, fmt.Errorf("scanning candidate: %w", err)
}
candidates = append(candidates, prioritize.CandidateWithTime{
res = append(res, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
@ -467,10 +470,29 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through locations: %w", err)
}
if len(res) == 0 && v2Output {
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
if err != nil {
return nil, fmt.Errorf("scanning compressorName: %w", err)
}
if found {
res = append(res, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressor,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: time.Time{},
})
}
}
candidates = append(candidates, res...)
return candidates, nil
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
@ -483,11 +505,11 @@ func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope type
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
}
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
var uncompressedDigest digest.Digest // = ""
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
res := []prioritize.CandidateWithTime{}
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo)
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
if err != nil {
return nil, err
}
@ -516,7 +538,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
return nil, err
}
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo)
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
if err != nil {
return nil, err
}
@ -527,7 +549,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
}
if uncompressedDigest != primaryDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo)
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
if err != nil {
return nil, err
}