Update osbuild/images to v0.79.0
Signed-off-by: Tomáš Hozza <thozza@redhat.com>
This commit is contained in:
parent
9fcbcdb5dc
commit
62d8ad4efe
340 changed files with 15526 additions and 2999 deletions
164
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
164
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
|
|
@ -25,57 +25,133 @@ const replacementAttempts = 5
|
|||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementUnknownLocationAttempts = 2
|
||||
|
||||
// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob
|
||||
// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options.
|
||||
// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest,
|
||||
// which can be later combined with information about a location.
|
||||
type CandidateTemplate struct {
|
||||
digest digest.Digest
|
||||
compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm
|
||||
}
|
||||
|
||||
// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable
|
||||
// for a CandidateLocations* call with v2Options.
|
||||
//
|
||||
// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
|
||||
// if not nil, the call is assumed to be CandidateLocations2.
|
||||
//
|
||||
// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2
|
||||
func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) {
|
||||
func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate {
|
||||
if v2Options == nil {
|
||||
return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used.
|
||||
return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used.
|
||||
digest: digest,
|
||||
}
|
||||
}
|
||||
|
||||
var op types.LayerCompression
|
||||
var algo *compression.Algorithm
|
||||
switch compressorName {
|
||||
requiredCompression := "nil"
|
||||
if v2Options.RequiredCompression != nil {
|
||||
requiredCompression = v2Options.RequiredCompression.Name()
|
||||
}
|
||||
switch data.BaseVariantCompressor {
|
||||
case blobinfocache.Uncompressed:
|
||||
op = types.Decompress
|
||||
algo = nil
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, nil) {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v",
|
||||
digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
|
||||
return nil
|
||||
}
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Decompress,
|
||||
compressionAlgorithm: nil,
|
||||
compressionAnnotations: nil,
|
||||
}
|
||||
case blobinfocache.UnknownCompression:
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
|
||||
return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2
|
||||
return nil // Not allowed with CandidateLocations2
|
||||
default:
|
||||
op = types.Compress
|
||||
algo_, err := compression.AlgorithmByName(compressorName)
|
||||
// See if we can use the specific variant, first.
|
||||
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor)
|
||||
if err != nil {
|
||||
logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v",
|
||||
data.SpecificVariantCompressor, digest.String(), err)
|
||||
} else {
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, &algo) {
|
||||
logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v",
|
||||
data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
|
||||
} else {
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Compress,
|
||||
compressionAlgorithm: &algo,
|
||||
compressionAnnotations: data.SpecificVariantAnnotations,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try the base variant.
|
||||
algo, err := compression.AlgorithmByName(data.BaseVariantCompressor)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
|
||||
digest.String(), compressorName, err)
|
||||
return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
|
||||
digest.String(), data.BaseVariantCompressor, err)
|
||||
return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
|
||||
}
|
||||
algo = &algo_
|
||||
}
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, algo) {
|
||||
requiredCompresssion := "nil"
|
||||
if v2Options.RequiredCompression != nil {
|
||||
requiredCompresssion = v2Options.RequiredCompression.Name()
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, &algo) {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
|
||||
digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats)
|
||||
return nil
|
||||
}
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Compress,
|
||||
compressionAlgorithm: &algo,
|
||||
compressionAnnotations: nil,
|
||||
}
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
|
||||
digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats)
|
||||
return false, types.PreserveOriginal, nil
|
||||
}
|
||||
|
||||
return true, op, algo
|
||||
}
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
}
|
||||
|
||||
// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen)
|
||||
func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime {
|
||||
return CandidateWithTime{
|
||||
candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
CompressionAnnotations: template.compressionAnnotations,
|
||||
UnknownLocation: false,
|
||||
Location: location,
|
||||
},
|
||||
lastSeen: lastSeen,
|
||||
}
|
||||
}
|
||||
|
||||
// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location.
|
||||
func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime {
|
||||
return CandidateWithTime{
|
||||
candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
CompressionAnnotations: template.compressionAnnotations,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
lastSeen: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize,
|
||||
|
|
@ -91,35 +167,35 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
|
|||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
||||
// First, deal with the primaryDigest/uncompressedDigest cases:
|
||||
if xi.Candidate.Digest != xj.Candidate.Digest {
|
||||
if xi.candidate.Digest != xj.candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.Candidate.Digest == css.primaryDigest {
|
||||
if xi.candidate.Digest == css.primaryDigest {
|
||||
return -1
|
||||
}
|
||||
if xj.Candidate.Digest == css.primaryDigest {
|
||||
if xj.candidate.Digest == css.primaryDigest {
|
||||
return 1
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.Candidate.Digest == css.uncompressedDigest {
|
||||
if xi.candidate.Digest == css.uncompressedDigest {
|
||||
return 1
|
||||
}
|
||||
if xj.Candidate.Digest == css.uncompressedDigest {
|
||||
if xj.candidate.Digest == css.uncompressedDigest {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
} else { // xi.Candidate.Digest == xj.Candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
|
||||
return -xi.LastSeen.Compare(xj.LastSeen)
|
||||
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
|
||||
return -xi.lastSeen.Compare(xj.lastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time
|
||||
if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time
|
||||
return -cmp
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest)
|
||||
return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest)
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
|
|
@ -138,7 +214,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
|||
uncompressedDigest: uncompressedDigest,
|
||||
}).compare)
|
||||
for _, candidate := range cs {
|
||||
if candidate.Candidate.UnknownLocation {
|
||||
if candidate.candidate.UnknownLocation {
|
||||
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
|
||||
} else {
|
||||
knownLocationCandidates = append(knownLocationCandidates, candidate)
|
||||
|
|
@ -150,11 +226,11 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
|||
unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates))
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
|
||||
for i := 0; i < knownLocationCandidatesUsed; i++ {
|
||||
res[i] = knownLocationCandidates[i].Candidate
|
||||
res[i] = knownLocationCandidates[i].candidate
|
||||
}
|
||||
// If candidates with unknown location are found, lets add them to final list
|
||||
for i := 0; i < unknownLocationCandidatesUsed; i++ {
|
||||
res = append(res, unknownLocationCandidates[i].Candidate)
|
||||
res = append(res, unknownLocationCandidates[i].candidate)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
|||
114
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
114
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
|
|
@ -24,10 +24,11 @@ type locationKey struct {
|
|||
type cache struct {
|
||||
mutex sync.Mutex
|
||||
// The following fields can only be accessed with mutex held.
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
uncompressedDigestsByTOC map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
|
|
@ -44,10 +45,11 @@ func New() types.BlobInfoCache {
|
|||
|
||||
func new2() *cache {
|
||||
return &cache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
compressors: map[digest.Digest]string{},
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
compressors: map[digest.Digest]blobinfocache.DigestCompressorData{},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -104,6 +106,30 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
|
|||
anyDigestSet.Add(anyDigest)
|
||||
}
|
||||
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok {
|
||||
return d
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
|
||||
}
|
||||
mem.uncompressedDigestsByTOC[tocDigest] = uncompressed
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
|
|
@ -118,19 +144,40 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
|||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
|
||||
// algorithm, or uncompressed, or that we no longer know.
|
||||
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
|
||||
// RecordDigestCompressorData records data for the blob with the specified digest.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
//
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName)
|
||||
if previous, ok := mem.compressors[anyDigest]; ok {
|
||||
if previous.BaseVariantCompressor != data.BaseVariantCompressor {
|
||||
logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor)
|
||||
} else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression &&
|
||||
previous.SpecificVariantCompressor != data.SpecificVariantCompressor {
|
||||
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor)
|
||||
}
|
||||
// We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic.
|
||||
|
||||
// Preserve specific variant information if the incoming data does not have it.
|
||||
if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression &&
|
||||
previous.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
data.SpecificVariantCompressor = previous.SpecificVariantCompressor
|
||||
data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations
|
||||
}
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
delete(mem.compressors, blobDigest)
|
||||
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
|
||||
delete(mem.compressors, anyDigest)
|
||||
return
|
||||
}
|
||||
mem.compressors[blobDigest] = compressorName
|
||||
mem.compressors[anyDigest] = data
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
|
||||
|
|
@ -140,38 +187,25 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
|
|||
// with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressorName = v
|
||||
compressionData := blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
}
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressionData = v
|
||||
}
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
|
||||
if template == nil {
|
||||
return candidates
|
||||
}
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
if len(locations) > 0 {
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: l,
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithLocation(l, t))
|
||||
}
|
||||
} else if v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithUnknownLocation())
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
|
|
|||
13
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
13
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
|
|
@ -34,6 +34,19 @@ func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
|||
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
|
|
|
|||
230
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
230
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
|
|
@ -3,6 +3,7 @@ package sqlite
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
|
@ -295,6 +296,24 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
|
|||
`PRIMARY KEY (transport, scope, digest, location)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
"DigestTOCUncompressedPairs",
|
||||
`CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` +
|
||||
// index implied by PRIMARY KEY
|
||||
`tocDigest TEXT PRIMARY KEY NOT NULL,` +
|
||||
`uncompressedDigest TEXT NOT NULL
|
||||
)`,
|
||||
},
|
||||
{
|
||||
"DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors.
|
||||
`CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` +
|
||||
// index implied by PRIMARY KEY
|
||||
`digest TEXT PRIMARY KEY NOT NULL,` +
|
||||
// The compressor is not `UnknownCompression`.
|
||||
`specificVariantCompressor TEXT NOT NULL,
|
||||
specificVariantAnnotations BLOB NOT NULL
|
||||
)`,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
|
||||
|
|
@ -385,6 +404,57 @@ func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
|
|||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
|
||||
uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if found {
|
||||
d, err := digest.Parse(uncompressedString)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return d, nil
|
||||
|
||||
}
|
||||
return "", nil
|
||||
})
|
||||
if err != nil {
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest)
|
||||
}
|
||||
if gotPrevious {
|
||||
previous, err := digest.Parse(previousString)
|
||||
if err != nil {
|
||||
return void{}, err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)",
|
||||
tocDigest.String(), uncompressed.String()); err != nil {
|
||||
return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err)
|
||||
}
|
||||
return void{}, nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) {
|
||||
|
|
@ -398,29 +468,58 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
|||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||
// or Uncompressed or UnknownCompression.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||
// digest just because some remote author claims so (e.g. because a manifest says so);
|
||||
// RecordDigestCompressorData records data for the blob with the specified digest.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
//
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
|
||||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
|
||||
return void{}, fmt.Errorf("looking for compressor of %q", anyDigest)
|
||||
}
|
||||
if gotPrevious && previous != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName)
|
||||
warned := false
|
||||
if gotPrevious && previous != data.BaseVariantCompressor {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
|
||||
warned = true
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
|
||||
if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
|
||||
return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
|
||||
}
|
||||
if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil {
|
||||
return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
|
||||
anyDigest.String(), compressorName); err != nil {
|
||||
return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err)
|
||||
anyDigest.String(), data.BaseVariantCompressor); err != nil {
|
||||
return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err)
|
||||
}
|
||||
}
|
||||
|
||||
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
if !warned { // Don’t warn twice about the same digest
|
||||
prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest)
|
||||
}
|
||||
if found && data.SpecificVariantCompressor != prevSVC {
|
||||
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor)
|
||||
}
|
||||
}
|
||||
annotations, err := json.Marshal(data.SpecificVariantAnnotations)
|
||||
if err != nil {
|
||||
return void{}, err
|
||||
}
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)",
|
||||
anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil {
|
||||
return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err)
|
||||
}
|
||||
}
|
||||
return void{}, nil
|
||||
|
|
@ -433,18 +532,33 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
|
|||
// with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
compressionData := blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
}
|
||||
if v2Options != nil {
|
||||
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning compressorName: %w", err)
|
||||
}
|
||||
if found {
|
||||
compressorName = compressor
|
||||
var baseVariantCompressor string
|
||||
var specificVariantCompressor sql.NullString
|
||||
var annotationBytes []byte
|
||||
switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+
|
||||
"FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()).
|
||||
Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); {
|
||||
case errors.Is(err, sql.ErrNoRows): // Do nothing
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("scanning compressor data: %w", err)
|
||||
default:
|
||||
compressionData.BaseVariantCompressor = baseVariantCompressor
|
||||
if specificVariantCompressor.Valid && annotationBytes != nil {
|
||||
compressionData.SpecificVariantCompressor = specificVariantCompressor.String
|
||||
if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
|
||||
if template == nil {
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
|
|
@ -463,15 +577,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
if err := rows.Scan(&location, &time); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: types.BICLocationReference{Opaque: location},
|
||||
},
|
||||
LastSeen: time,
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time))
|
||||
rowAdded = true
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
|
|
@ -479,16 +585,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
}
|
||||
|
||||
if !rowAdded && v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithUnknownLocation())
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
|
|
@ -516,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
|
||||
// (In the extreme, we could turn _everything_ this function does into a single query.
|
||||
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
|
||||
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
|
||||
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying for other digests: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var otherDigestString string
|
||||
if err := rows.Scan(&otherDigestString); err != nil {
|
||||
return nil, fmt.Errorf("scanning other digest: %w", err)
|
||||
}
|
||||
otherDigest, err := digest.Parse(otherDigestString)
|
||||
if uncompressedDigest != "" {
|
||||
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
|
||||
// (In the extreme, we could turn _everything_ this function does into a single query.
|
||||
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
|
||||
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
|
||||
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("querying for other digests: %w", err)
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var otherDigestString string
|
||||
if err := rows.Scan(&otherDigestString); err != nil {
|
||||
return nil, fmt.Errorf("scanning other digest: %w", err)
|
||||
}
|
||||
otherDigest, err := digest.Parse(otherDigestString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through other digests: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through other digests: %w", err)
|
||||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue