go.mod: update osbuild/images to v0.69.0

This commit is contained in:
Achilleas Koutsou 2024-07-02 14:42:15 +02:00
parent 1cc90c6a0b
commit 8ac80e8abc
611 changed files with 28281 additions and 32629 deletions

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"io"
"maps"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/manifest"
@ -12,7 +13,6 @@ import (
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
var (
@ -287,7 +287,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
maps.Copy(*annotations, d.uploadedAnnotations)
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
// and the original srcInfo (which the caller guarantees has been validated).
// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,

View file

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"os"
"slices"
"time"
"github.com/containers/image/v5/docker/reference"
@ -25,7 +26,6 @@ import (
encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
"golang.org/x/sync/semaphore"
"golang.org/x/term"
)

View file

@ -2,13 +2,13 @@ package copy
import (
"fmt"
"maps"
"slices"
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
@ -47,13 +47,17 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
desc := imgspecv1.Descriptor{
Annotations: stream.info.Annotations,
}
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
// DecryptLayer supposedly returns a digest of the decrypted stream.
// In pratice, that value is never set in the current implementation.
// And we shouldnt use it anyway, because it is not trusted: encryption can be made to a public key,
// i.e. it doesnt authenticate the origin of the metadata in any way.
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
if err != nil {
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
}
stream.reader = reader
stream.info.Digest = decryptedDigest
stream.info.Digest = ""
stream.info.Size = -1
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
return strings.HasPrefix(k, "org.opencontainers.image.enc")

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"strings"
internalManifest "github.com/containers/image/v5/internal/manifest"
@ -13,7 +14,6 @@ import (
"github.com/containers/image/v5/types"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
@ -74,7 +74,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
srcType := in.srcMIMEType
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
if srcType != normalizedSrcType {
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType)
srcType = normalizedSrcType
}
@ -237,7 +237,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
}
}
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
if len(prioritizedTypes.list) == 0 {
return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
}

View file

@ -5,6 +5,8 @@ import (
"context"
"errors"
"fmt"
"maps"
"slices"
"sort"
"strings"
@ -17,8 +19,6 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type instanceCopyKind int

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
"math"
"time"
"github.com/containers/image/v5/internal/private"
@ -151,12 +152,18 @@ type blobChunkAccessorProxy struct {
// The specified chunks must be not overlapping and sorted by their offset.
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
// If the Length for the last chunk is set to math.MaxUint64, then it
// fully fetches the remaining data from the offset to the end of the blob.
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
start := time.Now()
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
if err == nil {
total := int64(0)
for _, c := range chunks {
// do not update the progress bar if there is a chunk with unknown length.
if c.Length == math.MaxUint64 {
return rc, errs, err
}
total += int64(c.Length)
}
s.bar.EwmaIncrInt64(total, time.Since(start))

View file

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"reflect"
"slices"
"strings"
"sync"
@ -25,7 +26,6 @@ import (
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v8"
"golang.org/x/exp/slices"
)
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@ -707,7 +707,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression
var requiredCompression *compressiontypes.Algorithm
if ic.requireCompressionFormatMatch {

View file

@ -15,6 +15,7 @@ import (
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
@ -263,7 +264,7 @@ func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error
// returns true if path exists
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
err := fileutils.Exists(path)
if err == nil {
return true, nil
}

View file

@ -4,6 +4,8 @@ import (
"fmt"
"os"
"path/filepath"
"github.com/containers/storage/pkg/fileutils"
)
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
@ -11,7 +13,7 @@ import (
// a non-existent name (but not a symlink pointing to a non-existent name)
// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
func ResolvePathToFullyExplicit(path string) (string, error) {
switch _, err := os.Lstat(path); {
switch err := fileutils.Lexists(path); {
case err == nil:
return resolveExistingPathToFullyExplicit(path)
case os.IsNotExist(err):

View file

@ -78,7 +78,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
}
nt, ok := parsedTag.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
return nil, fmt.Errorf("Invalid tag %q (%s): does not contain a tag", tag, parsedTag.String())
}
ref, err := newReference(r.path, nt, -1, r.archive, nil)
if err != nil {

View file

@ -116,7 +116,7 @@ func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) err
return fmt.Errorf("parsing docker load progress: %w", err)
}
if msg.Error != nil {
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
return fmt.Errorf("docker engine reported: %q", msg.Error.Message)
}
}
return nil // No error reported = success

View file

@ -21,10 +21,10 @@ import (
"fmt"
"io"
"net/http"
"slices"
"github.com/docker/distribution/registry/api/errcode"
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
"golang.org/x/exp/slices"
)
// errNoErrorsInBody is returned when an HTTP response body parses to an empty

View file

@ -18,6 +18,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/useragent"
"github.com/containers/image/v5/manifest"
@ -25,6 +26,7 @@ import (
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
@ -186,7 +188,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
}
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
_, err := os.Stat(fullCertDirPath)
err := fileutils.Exists(fullCertDirPath)
if err == nil {
break
}
@ -497,8 +499,8 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
// Checks if the auth headers in the response contain an indication of a failed
// authorizdation because of an "insufficient_scope" error. If that's the case,
// returns the required scope to be used for fetching a new token.
func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
if err == nil && res.StatusCode == http.StatusUnauthorized {
func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) {
if res.StatusCode == http.StatusUnauthorized {
challenges := parseAuthHeader(res.Header)
for _, challenge := range challenges {
if challenge.Scheme == "bearer" {
@ -557,6 +559,9 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
attempts := 0
for {
res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope)
if err != nil {
return nil, err
}
attempts++
// By default we use pre-defined scopes per operation. In
@ -572,27 +577,29 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
// We also cannot retry with a body (stream != nil) as stream
// was already read
if attempts == 1 && stream == nil && auth != noAuth {
if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
if retry, newScope := needsRetryWithUpdatedScope(res); retry {
logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
res.Body.Close()
// Note: This retry ignores extraScope. Thats, strictly speaking, incorrect, but we dont currently
// expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
// for more than one extra scope.
res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope)
if err != nil {
return nil, err
}
extraScope = newScope
}
}
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
if res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
stream != nil || // We can't retry with a body (which is not restartable in the general case)
attempts == backoffNumIterations {
return res, err
return res, nil
}
// close response body before retry or context done
res.Body.Close()
delay = parseRetryAfter(res, delay)
if delay > backoffMaxDelay {
delay = backoffMaxDelay
}
delay = min(parseRetryAfter(res, delay), backoffMaxDelay)
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds())
select {
case <-ctx.Done():
@ -671,10 +678,14 @@ func parseRegistryWarningHeader(header string) string {
// warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
// distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) {
header, ok := strings.CutPrefix(header, expectedPrefix)
if !ok {
return ""
}
header, ok = strings.CutSuffix(header, expectedSuffix)
if !ok {
return ""
}
header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)]
// ”Recipients that process the value of a quoted-string MUST handle a quoted-pair
// as if it were replaced by the octet following the backslash.”, so lets do that…
@ -1009,11 +1020,7 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
if remoteErrors == nil {
return nil, 0, nil // fallback to non-external blob
}
err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0])
for _, e := range remoteErrors[1:] {
err = fmt.Errorf("%s, %w", err, e)
}
return nil, 0, err
return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors))
}
func getBlobSize(resp *http.Response) int64 {
@ -1090,6 +1097,11 @@ func isManifestUnknownError(err error) bool {
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
return true
}
// Harbor v2.10.2
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") {
return true
}
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
// but specifies that the HTTP status must be 404.
var unexpected *unexpectedHTTPResponseError

View file

@ -14,6 +14,7 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
@ -90,6 +91,14 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
}
for _, tag := range tagsHolder.Tags {
if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
// Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
// contrary to the tag format specified in
// https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
// include digests in the list.
if _, err := digest.Parse(tag); err == nil {
logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag)
continue
}
return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err)
}
tags = append(tags, tag)

View file

@ -8,10 +8,12 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"net/url"
"os"
"path/filepath"
"slices"
"strings"
"github.com/containers/image/v5/docker/reference"
@ -34,8 +36,6 @@ import (
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type dockerImageDestination struct {
@ -347,35 +347,24 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
}
// Then try reusing blobs from other locations.
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{
CanSubstitute: options.CanSubstitute,
PossibleManifestFormats: options.PossibleManifestFormats,
RequiredCompression: options.RequiredCompression,
})
for _, candidate := range candidates {
var err error
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
continue
}
var candidateRepo reference.Named
if !candidate.UnknownLocation {
var err error
candidateRepo, err = parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
}
if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) {
if !candidate.UnknownLocation {
logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(),
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
} else {
logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(),
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
}
continue
}
if !candidate.UnknownLocation {
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
if candidate.CompressionAlgorithm != nil {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
} else {
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
}
@ -390,8 +379,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
continue
}
} else {
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
if candidate.CompressionAlgorithm != nil {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name())
} else {
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
}
@ -442,8 +431,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
return true, private.ReusedBlob{
Digest: candidate.Digest,
Size: size,
CompressionOperation: compressionOperation,
CompressionAlgorithm: compressionAlgorithm}, nil
CompressionOperation: candidate.CompressionOperation,
CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
}
return false, private.ReusedBlob{}, nil

View file

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"math"
"mime"
"mime/multipart"
"net/http"
@ -260,9 +261,15 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
}
currentOffset += toSkip
}
var reader io.Reader
if c.Length == math.MaxUint64 {
reader = body
} else {
reader = io.LimitReader(body, int64(c.Length))
}
s := signalCloseReader{
closed: make(chan struct{}),
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
stream: io.NopCloser(reader),
consumeStream: true,
}
streams <- s
@ -343,12 +350,24 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
// The specified chunks must be not overlapping and sorted by their offset.
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
// If the Length for the last chunk is set to math.MaxUint64, then it
// fully fetches the remaining data from the offset to the end of the blob.
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
headers := make(map[string][]string)
rangeVals := make([]string, 0, len(chunks))
lastFound := false
for _, c := range chunks {
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
if lastFound {
return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk")
}
// If the Length is set to -1, then request anything after the specified offset.
if c.Length == math.MaxUint64 {
lastFound = true
rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset))
} else {
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
}
}
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}

View file

@ -54,16 +54,12 @@ type dockerReference struct {
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
if !strings.HasPrefix(refString, "//") {
refString, ok := strings.CutPrefix(refString, "//")
if !ok {
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
}
// Check if ref has UnknownDigestSuffix suffixed to it
unknownDigest := false
if strings.HasSuffix(refString, UnknownDigestSuffix) {
unknownDigest = true
refString = strings.TrimSuffix(refString, UnknownDigestSuffix)
}
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
refString, unknownDigest := strings.CutSuffix(refString, UnknownDigestSuffix)
ref, err := reference.ParseNormalizedNamed(refString)
if err != nil {
return nil, err
}

View file

@ -231,7 +231,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
}
if !header.FileInfo().Mode().IsRegular() {
return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
return nil, fmt.Errorf("Error reading tar archive component %q: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
@ -262,7 +262,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
file, err := r.openTarComponent(path)
if err != nil {
return nil, fmt.Errorf("loading tar component %s: %w", path, err)
return nil, fmt.Errorf("loading tar component %q: %w", path, err)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)

View file

@ -95,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
}
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
return fmt.Errorf("decoding tar config %q: %w", tarManifest.Config, err)
}
if parsedConfig.RootFS == nil {
return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
return fmt.Errorf("Invalid image config (rootFS is not set): %q", tarManifest.Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
@ -144,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
layerPath := path.Clean(tarManifest.Layers[i])
if _, ok := unknownLayerSizes[layerPath]; ok {
return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
return nil, fmt.Errorf("Layer tarfile %q used for two different DiffID values", layerPath)
}
li := &layerInfo{ // A new element in each iteration
path: layerPath,
@ -179,7 +179,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// the slower method of checking if it's compressed.
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
if err != nil {
return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
return nil, fmt.Errorf("auto-decompressing %q to determine its size: %w", layerPath, err)
}
defer uncompressedStream.Close()
@ -187,7 +187,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
if isCompressed {
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
if err != nil {
return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
return nil, fmt.Errorf("reading %q to find its size: %w", layerPath, err)
}
}
li.size = uncompressedSize

View file

@ -9,6 +9,7 @@ import (
"io"
"os"
"path/filepath"
"slices"
"sync"
"time"
@ -19,7 +20,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
@ -164,7 +164,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
return fmt.Errorf("marshaling layer config: %w", err)
}
delete(layerConfig, "layer_id")
layerID := digest.Canonical.FromBytes(b).Hex()
layerID := digest.Canonical.FromBytes(b).Encoded()
layerConfig["id"] = layerID
configBytes, err := json.Marshal(layerConfig)
@ -309,10 +309,10 @@ func (w *Writer) Close() error {
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
return configDigest.Hex() + ".json", nil
return configDigest.Encoded() + ".json", nil
}
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
@ -320,15 +320,15 @@ func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
return layerDigest.Hex() + ".tar", nil
return layerDigest.Encoded() + ".tar", nil
}
type tarFI struct {

View file

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
@ -93,7 +94,7 @@ func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
return sys.RegistriesDirPath
}
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
if err := fileutils.Exists(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
@ -139,7 +140,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
if config.DefaultDocker != nil {
if mergedConfig.DefaultDocker != nil {
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`,
dockerDefaultMergedFrom, configPath)
}
mergedConfig.DefaultDocker = config.DefaultDocker
@ -148,7 +149,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
if _, ok := mergedConfig.Docker[nsName]; ok {
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`,
nsName, nsMergedFrom[nsName], configPath)
}
mergedConfig.Docker[nsName] = nsConfig
@ -287,10 +288,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
// base is not nil from the caller
// NOTE: Keep this in sync with docs/signature-protocols.md!
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return nil, err
}
sigURL := *base
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1)
return &sigURL, nil
}

View file

@ -1,8 +1,6 @@
package blobinfocache
import (
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@ -32,7 +30,7 @@ func (bic *v1OnlyBlobInfoCache) Close() {
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
}
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
return nil
}
@ -48,23 +46,3 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
}
return candidates
}
// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
// upon success.
func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
switch compressorName {
case Uncompressed:
return types.Decompress, nil, nil
case UnknownCompression:
return types.PreserveOriginal, nil, nil
default:
algo, err := compression.AlgorithmByName(compressorName)
if err == nil {
return types.Compress, &algo, nil
}
return types.PreserveOriginal, nil, err
}
}

View file

@ -1,6 +1,7 @@
package blobinfocache
import (
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@ -35,19 +36,24 @@ type BlobInfoCache2 interface {
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
}
// CandidateLocations2Options are used in CandidateLocations2.
type CandidateLocations2Options struct {
// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
// CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//
// The CompressorName fields in returned data must never be UnknownCompression.
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
CanSubstitute bool
PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
}
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
type BICReplacementCandidate2 struct {
Digest digest.Digest
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
UnknownLocation bool // is true when `Location` for this blob is not set
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
Digest digest.Digest
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
UnknownLocation bool // is true when `Location` for this blob is not set
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
}

View file

@ -366,7 +366,7 @@ func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string)
if err := blobDigest.Validate(); err != nil {
return "", err
}
parts := append([]string{blobDigest.Hex()}, others...)
parts := append([]string{blobDigest.Encoded()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil
}

View file

@ -76,7 +76,7 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
case imgspecv1.MediaTypeImageIndex:
return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt)
}
}

View file

@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
"slices"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
@ -15,7 +16,6 @@ import (
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
type manifestOCI1 struct {

View file

@ -3,40 +3,13 @@ package impl
import (
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
compression "github.com/containers/image/v5/pkg/compression/types"
"golang.org/x/exp/slices"
)
// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required
// then function performs a match against the compression requested by the caller and compression of existing blob
// (which can be nil to represent uncompressed or unknown)
func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
if options.RequiredCompression != nil {
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we dont record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
}
if candidateCompression == nil ||
(options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
return false
}
}
// For candidateCompression == nil, we cant tell the difference between “uncompressed” and “unknown”;
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
// so dont impose any restrictions if candidateCompression == nil
if options.PossibleManifestFormats != nil && candidateCompression != nil {
if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool {
return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
}) {
return false
}
}
return true
}
// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions
// is acceptable based on opts.
func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool {
return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression)
return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: opts.PossibleManifestFormats,
RequiredCompression: opts.RequiredCompression,
}, opts.OriginalCompression)
}

View file

@ -39,6 +39,8 @@ func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
// The specified chunks must be not overlapping and sorted by their offset.
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
// If the Length for the last chunk is set to math.MaxUint64, then it
// fully fetches the remaining data from the offset to the end of the blob.
func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
}

View file

@ -3,13 +3,13 @@ package manifest
import (
"encoding/json"
"fmt"
"slices"
platform "github.com/containers/image/v5/internal/pkg/platform"
compression "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
@ -164,7 +164,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.
}
}
}
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.

View file

@ -129,5 +129,5 @@ func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
}
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized)
}

View file

@ -2,6 +2,7 @@ package manifest
import (
"encoding/json"
"slices"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/libtrust"
@ -192,3 +193,39 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes
return false
}
}
// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions;
// it is a struct to allow longer and better-documented field names.
type ReuseConditions struct {
PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
}
// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
if c.RequiredCompression != nil {
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we dont record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
}
if candidateCompression == nil ||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
return false
}
}
// For candidateCompression == nil, we cant tell the difference between “uncompressed” and “unknown”;
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
// so dont impose any restrictions if candidateCompression == nil
if c.PossibleManifestFormats != nil && candidateCompression != nil {
if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool {
return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
}) {
return false
}
}
return true
}

View file

@ -3,8 +3,10 @@ package manifest
import (
"encoding/json"
"fmt"
"maps"
"math"
"runtime"
"slices"
platform "github.com/containers/image/v5/internal/pkg/platform"
compression "github.com/containers/image/v5/pkg/compression/types"
@ -12,8 +14,6 @@ import (
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
const (
@ -260,7 +260,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
if bestMatch != nil {
return bestMatch.digest, nil
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {

View file

@ -0,0 +1,34 @@
package multierr
import (
"fmt"
"strings"
)
// Format creates an error value from the input array (which should not be empty)
// If the input contains a single error value, it is returned as is.
// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings.
//
// Typical usage:
//
// var errs []error
// // …
// errs = append(errs, …)
// // …
// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)}
func Format(first, middle, last string, errs []error) error {
switch len(errs) {
case 0:
return fmt.Errorf("internal error: multierr.Format called with 0 errors")
case 1:
return errs[0]
default:
// We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid:
// []error is not a valid parameter to a function expecting []any
anyErrs := make([]any, 0, len(errs))
for _, e := range errs {
anyErrs = append(anyErrs, e)
}
return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...)
}
}

View file

@ -21,12 +21,12 @@ import (
"fmt"
"os"
"runtime"
"slices"
"strings"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// For Linux, the kernel has already detected the ABI, ISA and Features.
@ -64,8 +64,8 @@ func getCPUInfo(pattern string) (info string, err error) {
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
}
func getCPUVariantWindows(arch string) string {
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
func getCPUVariantDarwinWindows(arch string) string {
// Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use
// runtime.GOARCH to determine the variants
var variant string
switch arch {
@ -133,8 +133,8 @@ func getCPUVariantArm() string {
}
func getCPUVariant(os string, arch string) string {
if os == "windows" {
return getCPUVariantWindows(arch)
if os == "darwin" || os == "windows" {
return getCPUVariantDarwinWindows(arch)
}
if arch == "arm" || arch == "arm64" {
return getCPUVariantArm()

View file

@ -143,7 +143,11 @@ type ReusedBlob struct {
// ImageSourceChunk is a portion of a blob.
// This API is experimental and can be changed without bumping the major version number.
type ImageSourceChunk struct {
// Offset specifies the starting position of the chunk within the source blob.
Offset uint64
// Length specifies the size of the chunk. If it is set to math.MaxUint64,
// then it refers to all the data from Offset to the end of the blob.
Length uint64
}
@ -154,6 +158,8 @@ type BlobChunkAccessor interface {
// The specified chunks must be not overlapping and sorted by their offset.
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
// If the Length for the last chunk is set to math.MaxUint64, then it
// fully fetches the remaining data from the offset to the end of the blob.
GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
}

View file

@ -1,10 +1,9 @@
package signature
import (
"bytes"
"encoding/json"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
"maps"
)
const (
@ -45,7 +44,7 @@ type sigstoreJSONRepresentation struct {
func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
return Sigstore{
untrustedMIMEType: untrustedMimeType,
untrustedPayload: slices.Clone(untrustedPayload),
untrustedPayload: bytes.Clone(untrustedPayload),
untrustedAnnotations: maps.Clone(untrustedAnnotations),
}
}
@ -79,7 +78,7 @@ func (s Sigstore) UntrustedMIMEType() string {
return s.untrustedMIMEType
}
func (s Sigstore) UntrustedPayload() []byte {
return slices.Clone(s.untrustedPayload)
return bytes.Clone(s.untrustedPayload)
}
func (s Sigstore) UntrustedAnnotations() map[string]string {

View file

@ -1,6 +1,6 @@
package signature
import "golang.org/x/exp/slices"
import "bytes"
// SimpleSigning is a “simple signing” signature.
type SimpleSigning struct {
@ -10,7 +10,7 @@ type SimpleSigning struct {
// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
return SimpleSigning{
untrustedSignature: slices.Clone(blobChunk),
untrustedSignature: bytes.Clone(blobChunk),
}
}
@ -21,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID {
// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
// Almost everyone should use signature.Blob() instead.
func (s SimpleSigning) blobChunk() ([]byte, error) {
return slices.Clone(s.untrustedSignature), nil
return bytes.Clone(s.untrustedSignature), nil
}
func (s SimpleSigning) UntrustedSignature() []byte {
return slices.Clone(s.untrustedSignature)
return bytes.Clone(s.untrustedSignature)
}

View file

@ -67,15 +67,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
if name != mtsUncompressed {
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)}
}
// We can't very well say “the idea of no compression is unknown”
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
if algorithm != nil {
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType)
}
return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType)
}
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to

View file

@ -4,6 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
"slices"
"strings"
"time"
@ -15,7 +16,6 @@ import (
"github.com/containers/storage/pkg/regexp"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
@ -221,7 +221,7 @@ func (m *Schema1) fixManifestLayers() error {
m.History = slices.Delete(m.History, i, i+1)
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
}
}
return nil
@ -342,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
if err != nil {
return "", err
}
return digest.FromBytes(image).Hex(), nil
return digest.FromBytes(image).Encoded(), nil
}

View file

@ -54,9 +54,10 @@ type Schema2HealthConfig struct {
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
@ -294,7 +295,7 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
return "", err
}
return m.ConfigDescriptor.Digest.Hex(), nil
return m.ConfigDescriptor.Digest.Encoded(), nil
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image

View file

@ -166,5 +166,5 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
}
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt)
}

View file

@ -3,6 +3,7 @@ package manifest
import (
"encoding/json"
"fmt"
"slices"
"strings"
"github.com/containers/image/v5/internal/manifest"
@ -12,7 +13,6 @@ import (
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@ -167,7 +167,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype)
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
@ -176,17 +176,18 @@ func getEncryptedMediaType(mediatype string) (string, error) {
return mediatype + "+encrypted", nil
}
return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype)
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
res, ok := strings.CutSuffix(mediatype, "+encrypted")
if !ok {
return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
return res, nil
}
// Serialize returns the manifest in a blob format.
@ -259,7 +260,7 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
return m.Config.Digest.Hex(), nil
return m.Config.Digest.Encoded(), nil
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image

View file

@ -149,6 +149,8 @@ func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
// The specified chunks must be not overlapping and sorted by their offset.
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
// If the Length for the last chunk is set to math.MaxUint64, then it
// fully fetches the remaining data from the offset to the end of the blob.
func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
}

View file

@ -6,13 +6,13 @@ import (
"fmt"
"io/fs"
"os"
"slices"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// DeleteImage deletes the named image from the directory, if supported.

View file

@ -6,9 +6,11 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"runtime"
"slices"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
@ -16,10 +18,10 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
type ociImageDestination struct {
@ -301,7 +303,7 @@ func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error
}
func ensureDirectoryExists(path string) error {
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
@ -317,7 +319,7 @@ func ensureParentDirectoryExists(path string) error {
// indexExists checks whether the index location specified in the OCI reference exists.
// The implementation is opinionated, since in case of unexpected errors false is returned
func indexExists(ref ociReference) bool {
_, err := os.Stat(ref.indexPath())
err := fileutils.Exists(ref.indexPath())
if err == nil {
return true
}

View file

@ -182,19 +182,19 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
hasSupportedURL = true
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
continue
}
resp, err := s.client.Do(req)
if err != nil {
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
continue
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap)
errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap)
continue
}

View file

@ -256,5 +256,5 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
} else {
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
}
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil
}

View file

@ -14,13 +14,14 @@ import (
"path"
"path/filepath"
"reflect"
"slices"
"strings"
"time"
"dario.cat/mergo"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/storage/pkg/homedir"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
"gopkg.in/yaml.v3"
)
@ -459,12 +460,6 @@ func (config *directClientConfig) getCluster() clientcmdCluster {
return mergedClusterInfo
}
// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
// This helper implements the error and Errors interfaces. Keeping it private
// prevents people from making an aggregate of 0 errors, which is not
// an error, but does satisfy the error interface.
type aggregateErr []error
// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
// NewAggregate converts a slice of errors into an Aggregate interface, which
// is itself an implementation of the error interface. If the slice is empty,
@ -485,29 +480,9 @@ func newAggregate(errlist []error) error {
if len(errs) == 0 {
return nil
}
return aggregateErr(errs)
return multierr.Format("[", ", ", "]", errs)
}
// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
// Error is part of the error interface.
func (agg aggregateErr) Error() string {
if len(agg) == 0 {
// This should never happen, really.
return ""
}
if len(agg) == 1 {
return agg[0].Error()
}
result := fmt.Sprintf("[%s", agg[0].Error())
for i := 1; i < len(agg); i++ {
result += fmt.Sprintf(", %s", agg[i].Error())
}
result += "]"
return result
}
// REMOVED: aggregateErr.Errors
// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
type errConfigurationInvalid []error
@ -578,7 +553,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
continue
}
if err != nil {
errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err))
errlist = append(errlist, fmt.Errorf("loading config file %q: %w", filename, err))
continue
}

View file

@ -152,7 +152,7 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
_, repo, gotRepo := strings.Cut(ref, "/")
if !gotRepo {
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
return "", fmt.Errorf("Invalid format of docker reference %q: missing '/'", ref)
}
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
}

View file

@ -9,6 +9,7 @@ import (
"fmt"
"io"
"net/http"
"slices"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
@ -21,7 +22,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
type openshiftImageDestination struct {

View file

@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path/filepath"
@ -29,6 +30,7 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/fileutils"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux"
@ -162,7 +164,7 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
return private.UploadedBlob{}, err
}
hash := blobDigest.Hex()
hash := blobDigest.Encoded()
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
@ -280,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest,
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
return err
}
@ -321,7 +323,7 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle,
}
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Dir(blob.BlobPath)
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
@ -346,10 +348,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
d.repo = repo
}
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return false, private.ReusedBlob{}, err
}
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
@ -477,7 +479,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.Digest.Hex()
hash := layer.Digest.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
@ -486,7 +488,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.BlobSum.Hex()
hash := layer.BlobSum.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
@ -514,7 +516,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
}
func ensureDirectoryExists(path string) error {
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}

View file

@ -190,7 +190,7 @@ func (o ostreeReader) Read(p []byte) (int, error) {
if count == 0 {
return 0, io.EOF
}
data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
data := unsafe.Slice((*byte)(C.g_bytes_get_data(b, nil)), count)
copy(p, data)
return count, nil
}
@ -289,7 +289,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, -1, err
}
blob := info.Digest.Hex()
blob := info.Digest.Encoded()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
@ -301,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
}
compressedBlob, isCompressed := s.compressed[info.Digest]
if isCompressed {
blob = compressedBlob.Hex()
blob = compressedBlob.Encoded()
}
branch := fmt.Sprintf("ociimage/%s", blob)
@ -424,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
layerBlobs := man.LayerInfos()
for _, layerBlob := range layerBlobs {
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return nil, err
@ -439,7 +439,10 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
if err != nil {
return nil, err
}
uncompressedDigest := digest.Digest(uncompressedDigestStr)
uncompressedDigest, err := digest.Parse(uncompressedDigestStr)
if err != nil {
return nil, err
}
blobInfo := types.BlobInfo{
Digest: uncompressedDigest,
Size: uncompressedSize,

View file

@ -1,13 +1,18 @@
// Package prioritize provides utilities for prioritizing locations in
// Package prioritize provides utilities for filtering and prioritizing locations in
// types.BlobInfoCache.CandidateLocations.
package prioritize
import (
"sort"
"cmp"
"slices"
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
@ -20,28 +25,67 @@ const replacementAttempts = 5
// This is a heuristic/guess, and could well use a different value.
const replacementUnknownLocationAttempts = 2
// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob
// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options.
//
// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
// if not nil, the call is assumed to be CandidateLocations2.
//
// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2
func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) {
if v2Options == nil {
return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used.
}
var op types.LayerCompression
var algo *compression.Algorithm
switch compressorName {
case blobinfocache.Uncompressed:
op = types.Decompress
algo = nil
case blobinfocache.UnknownCompression:
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2
default:
op = types.Compress
algo_, err := compression.AlgorithmByName(compressorName)
if err != nil {
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
digest.String(), compressorName, err)
return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
}
algo = &algo_
}
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: v2Options.PossibleManifestFormats,
RequiredCompression: v2Options.RequiredCompression,
}, algo) {
requiredCompresssion := "nil"
if v2Options.RequiredCompression != nil {
requiredCompresssion = v2Options.RequiredCompression.Name()
}
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats)
return false, types.PreserveOriginal, nil
}
return true, op, algo
}
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type CandidateWithTime struct {
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
}
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
// along with the specially-treated digest values for the implementation of sort.Interface.Less
// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize,
// along with the specially-treated digest values relevant to the ordering.
type candidateSortState struct {
cs []CandidateWithTime // The entries to sort
primaryDigest digest.Digest // The digest the user actually asked for
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
primaryDigest digest.Digest // The digest the user actually asked for
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
}
func (css *candidateSortState) Len() int {
return len(css.cs)
}
func (css *candidateSortState) Less(i, j int) bool {
xi := css.cs[i]
xj := css.cs[j]
func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
// primaryDigest entries come first, more recent first.
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
@ -50,43 +94,32 @@ func (css *candidateSortState) Less(i, j int) bool {
if xi.Candidate.Digest != xj.Candidate.Digest {
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
if xi.Candidate.Digest == css.primaryDigest {
return true
return -1
}
if xj.Candidate.Digest == css.primaryDigest {
return false
return 1
}
if css.uncompressedDigest != "" {
if xi.Candidate.Digest == css.uncompressedDigest {
return false
return 1
}
if xj.Candidate.Digest == css.uncompressedDigest {
return true
return -1
}
}
} else { // xi.Candidate.Digest == xj.Candidate.Digest
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
return xi.LastSeen.After(xj.LastSeen)
return -xi.LastSeen.Compare(xj.LastSeen)
}
}
// Neither of the digests are primaryDigest/uncompressedDigest:
if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
return xi.LastSeen.After(xj.LastSeen)
if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time
return -cmp
}
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
return xi.Candidate.Digest < xj.Candidate.Digest
}
func (css *candidateSortState) Swap(i, j int) {
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
}
func min(a, b int) int {
if a < b {
return a
}
return b
return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest)
}
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
@ -100,12 +133,10 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
var unknownLocationCandidates []CandidateWithTime
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
sort.Sort(&candidateSortState{
cs: cs,
slices.SortFunc(cs, (&candidateSortState{
primaryDigest: primaryDigest,
uncompressedDigest: uncompressedDigest,
})
}).compare)
for _, candidate := range cs {
if candidate.Candidate.UnknownLocation {
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
@ -116,7 +147,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
remainingCapacity := totalLimit - knownLocationCandidatesUsed
unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates)))
unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates))
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
for i := 0; i < knownLocationCandidatesUsed; i++ {
res[i] = knownLocationCandidates[i].Candidate

View file

@ -135,14 +135,17 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
// with corresponding compression info from mem.compressors, and returns the result of appending
// them to candidates. v2Output allows including candidates with unknown location, and filters out
// candidates with unknown compression.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
// them to candidates.
// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
// with unknown compression.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
compressorName := blobinfocache.UnknownCompression
if v, ok := mem.compressors[digest]; ok {
compressorName = v
}
if compressorName == blobinfocache.UnknownCompression && v2Output {
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
if !ok {
return candidates
}
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
@ -150,20 +153,22 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
for l, t := range locations {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: l,
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
Location: l,
},
LastSeen: t,
})
}
} else if v2Output {
} else if v2Options != nil {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: time.Time{},
})
@ -178,24 +183,24 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil))
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options)
}
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
// candidateLocations implements CandidateLocations / CandidateLocations2.
// v2Options is not nil if the caller is CandidateLocations2.
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
mem.mutex.Lock()
defer mem.mutex.Unlock()
res := []prioritize.CandidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
@ -203,12 +208,12 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
if otherDigests != nil {
for _, d := range otherDigests.Values() {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options)
}
}
}
if uncompressedDigest != primaryDigest {
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options)
}
}
}

View file

@ -428,88 +428,86 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
// location, and filters out candidates with unknown compression.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
var rows *sql.Rows
var err error
if v2Output {
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
"ON KnownLocations.digest = DigestCompressors.digest "+
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
transport.Name(), scope.Opaque, digest.String())
} else {
rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+
"LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
blobinfocache.UnknownCompression,
transport.Name(), scope.Opaque, digest.String())
}
if err != nil {
return nil, fmt.Errorf("looking up candidate locations: %w", err)
}
defer rows.Close()
res := []prioritize.CandidateWithTime{}
for rows.Next() {
var location string
var time time.Time
var compressorName string
if err := rows.Scan(&location, &time, &compressorName); err != nil {
return nil, fmt.Errorf("scanning candidate: %w", err)
}
res = append(res, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: types.BICLocationReference{Opaque: location},
},
LastSeen: time,
})
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through locations: %w", err)
}
if len(res) == 0 && v2Output {
// and returns the result of appending them to candidates.
// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
// with unknown compression.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
compressorName := blobinfocache.UnknownCompression
if v2Options != nil {
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
if err != nil {
return nil, fmt.Errorf("scanning compressorName: %w", err)
}
if found {
res = append(res, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressor,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: time.Time{},
})
compressorName = compressor
}
}
candidates = append(candidates, res...)
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
if !ok {
return candidates, nil
}
rows, err := tx.Query("SELECT location, time FROM KnownLocations "+
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
transport.Name(), scope.Opaque, digest.String())
if err != nil {
return nil, fmt.Errorf("looking up candidate locations: %w", err)
}
defer rows.Close()
rowAdded := false
for rows.Next() {
var location string
var time time.Time
if err := rows.Scan(&location, &time); err != nil {
return nil, fmt.Errorf("scanning candidate: %w", err)
}
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
Location: types.BICLocationReference{Opaque: location},
},
LastSeen: time,
})
rowAdded = true
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through locations: %w", err)
}
if !rowAdded && v2Options != nil {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: compressionOp,
CompressionAlgorithm: compressionAlgo,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: time.Time{},
})
}
return candidates, nil
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//
// The CompressorName fields in returned data must never be UnknownCompression.
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options)
}
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
// candidateLocations implements CandidateLocations / CandidateLocations2.
// v2Options is not nil if the caller is CandidateLocations2.
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
var uncompressedDigest digest.Digest // = ""
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
res := []prioritize.CandidateWithTime{}
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options)
if err != nil {
return nil, err
}
@ -538,7 +536,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
return nil, err
}
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
if err != nil {
return nil, err
}
@ -549,7 +547,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
}
if uncompressedDigest != primaryDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
if err != nil {
return nil, err
}
@ -571,5 +569,5 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false))
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil))
}

View file

@ -13,14 +13,15 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/ioutils"
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/hashicorp/go-multierror"
"github.com/sirupsen/logrus"
)
@ -231,7 +232,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
return types.DockerAuthConfig{}, err
}
var multiErr error
var multiErr []error
for _, helper := range helpers {
var (
creds types.DockerAuthConfig
@ -253,7 +254,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
}
if err != nil {
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
multiErr = multierror.Append(multiErr, err)
multiErr = append(multiErr, err)
continue
}
if creds != (types.DockerAuthConfig{}) {
@ -266,7 +267,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
}
}
if multiErr != nil {
return types.DockerAuthConfig{}, multiErr
return types.DockerAuthConfig{}, multierr.Format("errors looking up credentials:\n\t* ", "\nt* ", "\n", multiErr)
}
logrus.Debugf("No credentials for %s found", key)
@ -313,7 +314,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
}
// Make sure to collect all errors.
var multiErr error
var multiErr []error
for _, helper := range helpers {
var desc string
var err error
@ -345,14 +346,14 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
}
}
if err != nil {
multiErr = multierror.Append(multiErr, err)
multiErr = append(multiErr, err)
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
continue
}
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
return desc, nil
}
return "", multiErr
return "", multierr.Format("Errors storing credentials\n\t* ", "\n\t* ", "\n", multiErr)
}
func unsupportedNamespaceErr(helper string) error {
@ -376,53 +377,56 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
return err
}
var multiErr error
isLoggedIn := false
removeFromCredHelper := func(helper string) {
removeFromCredHelper := func(helper string) error {
if isNamespaced {
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
return
return nil
}
err := deleteCredsFromCredHelper(helper, key)
if err == nil {
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
isLoggedIn = true
return
return nil
}
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
return
return nil
}
multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
return fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)
}
var multiErr []error
for _, helper := range helpers {
var err error
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
_, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
var helperErr error
if innerHelper, exists := fileContents.CredHelpers[key]; exists {
removeFromCredHelper(innerHelper)
helperErr = removeFromCredHelper(innerHelper)
}
if _, ok := fileContents.AuthConfigs[key]; ok {
isLoggedIn = true
delete(fileContents.AuthConfigs, key)
}
return true, "", multiErr
return true, "", helperErr
})
if err != nil {
multiErr = multierror.Append(multiErr, err)
multiErr = append(multiErr, err)
}
// External helpers.
default:
removeFromCredHelper(helper)
if err := removeFromCredHelper(helper); err != nil {
multiErr = append(multiErr, err)
}
}
}
if multiErr != nil {
return multiErr
return multierr.Format("errors removing credentials\n\t* ", "\n\t*", "\n", multiErr)
}
if !isLoggedIn {
return ErrNotLoggedIn
@ -439,7 +443,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
return err
}
var multiErr error
var multiErr []error
for _, helper := range helpers {
var err error
switch helper {
@ -479,13 +483,16 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
}
if err != nil {
logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
multiErr = multierror.Append(multiErr, err)
multiErr = append(multiErr, err)
continue
}
logrus.Debugf("All credentials removed from credential helper %s", helper)
}
return multiErr
if multiErr != nil {
return multierr.Format("errors removing all credentials:\n\t* ", "\n\t* ", "\n", multiErr)
}
return nil
}
// prepareForEdit processes sys and key (if keyRelevant) to return:
@ -570,9 +577,9 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
// This function does not in general need to separately check that the returned path exists; thats racy, and callers will fail accessing the file anyway.
// We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case.
_, err := os.Stat(runtimeDir)
if os.IsNotExist(err) {
// We are checking for fs.ErrNotExist here only to give the user better guidance what to do in this special case.
err := fileutils.Exists(runtimeDir)
if errors.Is(err, fs.ErrNotExist) {
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
// or made a typo while setting the environment variable,
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.

View file

@ -2,6 +2,7 @@ package sysregistriesv2
import (
"fmt"
"maps"
"os"
"path/filepath"
"reflect"
@ -9,12 +10,12 @@ import (
"github.com/BurntSushi/toml"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// defaultShortNameMode is the default mode of registries.conf files if the
@ -297,11 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
}
}
if len(errs) > 0 {
err := errs[0]
for i := 1; i < len(errs); i++ {
err = fmt.Errorf("%v\n: %w", errs[i], err)
}
return nil, err
return nil, multierr.Format("", "\n", "", errs)
}
return &res, nil
}

View file

@ -13,6 +13,7 @@ import (
"github.com/BurntSushi/toml"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/regexp"
"github.com/sirupsen/logrus"
@ -564,7 +565,7 @@ func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) confi
// decide configPath using per-user path or system file
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
wrapper.configPath = ctx.SystemRegistriesConfPath
} else if _, err := os.Stat(userRegistriesFilePath); err == nil {
} else if err := fileutils.Exists(userRegistriesFilePath); err == nil {
// per-user registries.conf exists, not reading system dir
// return config dirs from ctx or per-user one
wrapper.configPath = userRegistriesFilePath

View file

@ -8,11 +8,11 @@ import (
"net/http"
"os"
"path/filepath"
"slices"
"strings"
"time"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
@ -55,9 +55,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
}
tlsc.RootCAs.AppendCertsFromPEM(data)
}
if strings.HasSuffix(f.Name(), ".cert") {
if base, ok := strings.CutSuffix(f.Name(), ".cert"); ok {
certName := f.Name()
keyName := certName[:len(certName)-5] + ".key"
keyName := base + ".key"
logrus.Debugf(" cert: %s", fullPath)
if !hasFile(fs, keyName) {
return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
@ -68,9 +68,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
}
tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
}
if strings.HasSuffix(f.Name(), ".key") {
if base, ok := strings.CutSuffix(f.Name(), ".key"); ok {
keyName := f.Name()
certName := keyName[:len(keyName)-4] + ".cert"
certName := base + ".cert"
logrus.Debugf(" key: %s", fullPath)
if !hasFile(fs, certName) {
return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)

View file

@ -111,7 +111,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
History: []imgspecv1.History{
{
Created: &created,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator),
Comment: "imported from SIF, uuid: " + sifImg.ID(),
},
{

View file

@ -5,13 +5,13 @@ package signature
import (
"errors"
"fmt"
"slices"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature/internal"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
// SignOptions includes optional parameters for signing container images.
@ -76,10 +76,10 @@ func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unver
validateSignedDockerReference: func(signedDockerReference string) error {
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
if err != nil {
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference))
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference))
}
if signedRef.String() != expectedRef.String() {
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s",
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q",
signedDockerReference, expectedDockerReference))
}
return nil

View file

@ -10,12 +10,12 @@ import (
"encoding/asn1"
"errors"
"fmt"
"slices"
"time"
"github.com/containers/image/v5/signature/internal"
"github.com/sigstore/fulcio/pkg/certificate"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"golang.org/x/exp/slices"
)
// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
@ -178,7 +178,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
// == Validate the OIDC subject
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)",
f.subjectEmail,
untrustedCertificate.EmailAddresses))
}

View file

@ -31,7 +31,7 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
return JSONFormatError(err.Error())
}
if t != json.Delim('{') {
return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t))
}
for {
t, err := dec.Token()
@ -45,16 +45,16 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
key, ok := t.(string)
if !ok {
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t))
}
if seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
return JSONFormatError(fmt.Sprintf("Duplicate key %q", key))
}
seenKeys.Add(key)
valuePtr := fieldResolver(key)
if valuePtr == nil {
return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
return JSONFormatError(fmt.Sprintf("Unknown key %q", key))
}
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
if err := dec.Decode(valuePtr); err != nil {
@ -83,7 +83,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
}
for key := range exactFields {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
}
}
return nil

View file

@ -220,7 +220,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`)
}
// FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values.
// Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21.
// Eventually we should support them as well.
// Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now thats not a compatibility
// issue.
if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 {

View file

@ -150,7 +150,11 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}); err != nil {
return err
}
s.untrustedDockerManifestDigest = digest.Digest(digestString)
digestValue, err := digest.Parse(digestString)
if err != nil {
return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
}
s.untrustedDockerManifestDigest = digestValue
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.untrustedDockerReference,

View file

@ -24,6 +24,7 @@ import (
"github.com/containers/image/v5/signature/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/regexp"
)
@ -65,7 +66,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
return sys.SignaturePolicyPath
}
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
if _, err := os.Stat(userPolicyFilePath); err == nil {
if err := fileutils.Exists(userPolicyFilePath); err == nil {
return userPolicyFilePath
}
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
@ -246,7 +247,7 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
case prTypeSigstoreSigned:
res = &prSigstoreSigned{}
default:
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type))
}
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
@ -278,7 +279,7 @@ func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prTypeInsecureAcceptAnything {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*pr = *newPRInsecureAcceptAnything()
return nil
@ -308,7 +309,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prTypeReject {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*pr = *newPRReject()
return nil
@ -317,7 +318,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
// newPRSignedBy returns a new prSignedBy if parameters are valid.
func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
if !keyType.IsValid() {
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType))
}
keySources := 0
if keyPath != "" {
@ -409,7 +410,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prTypeSignedBy {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
if signedIdentity == nil {
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
@ -465,7 +466,7 @@ func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
return err
}
if !sbKeyType(s).IsValid() {
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s))
}
*kt = sbKeyType(s)
return nil
@ -503,7 +504,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prTypeSignedBaseLayer {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
if err != nil {
@ -539,7 +540,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
case prmTypeRemapIdentity:
res = &prmRemapIdentity{}
default:
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type))
}
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
@ -571,7 +572,7 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeMatchExact {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*prm = *newPRMMatchExact()
return nil
@ -601,7 +602,7 @@ func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeMatchRepoDigestOrExact {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*prm = *newPRMMatchRepoDigestOrExact()
return nil
@ -631,7 +632,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeMatchRepository {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
*prm = *newPRMMatchRepository()
return nil
@ -641,10 +642,10 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
ref, err := reference.ParseNormalizedNamed(dockerReference)
if err != nil {
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error()))
}
if reference.IsNameOnly(ref) {
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference))
}
return &prmExactReference{
prmCommon: prmCommon{Type: prmTypeExactReference},
@ -672,7 +673,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeExactReference {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
res, err := newPRMExactReference(tmp.DockerReference)
@ -686,7 +687,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error()))
}
return &prmExactRepository{
prmCommon: prmCommon{Type: prmTypeExactRepository},
@ -714,7 +715,7 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeExactRepository {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
res, err := newPRMExactRepository(tmp.DockerRepository)
@ -787,7 +788,7 @@ func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prmTypeRemapIdentity {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix)

View file

@ -176,7 +176,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
}
if tmp.Type != prTypeSigstoreSigned {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
}
if signedIdentity == nil {
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()

View file

@ -94,10 +94,10 @@ const (
pcDestroyed policyContextState = "Destroyed"
)
// changeContextState changes pc.state, or fails if the state is unexpected
// changeState changes pc.state, or fails if the state is unexpected
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
if pc.state != expected {
return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state)
}
pc.state = new
return nil
@ -140,21 +140,21 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
// Look for a full match.
identity := ref.PolicyConfigurationIdentity()
if req, ok := transportScopes[identity]; ok {
logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
logrus.Debugf(` Using transport %q policy section %q`, transportName, identity)
return req
}
// Look for a match of the possible parent namespaces.
for _, name := range ref.PolicyConfigurationNamespaces() {
if req, ok := transportScopes[name]; ok {
logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name)
return req
}
}
// Look for a default match for the transport.
if req, ok := transportScopes[""]; ok {
logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
logrus.Debugf(` Using transport %q policy section ""`, transportName)
return req
}
}

View file

@ -7,12 +7,12 @@ import (
"errors"
"fmt"
"os"
"strings"
"slices"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
digest "github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
@ -20,10 +20,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
case SBKeyTypeGPGKeys:
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
// FIXME? Reject this at policy parsing time already?
return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType))
return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType))
default:
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType))
return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType))
}
// FIXME: move this to per-context initialization
@ -77,7 +77,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
},
validateSignedDockerReference: func(ref string) error {
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
}
return nil
},
@ -123,7 +123,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.U
// Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough
default:
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
}
rejections = append(rejections, reason)
}
@ -134,12 +134,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.U
case 1:
summary = rejections[0]
default:
var msgs []string
for _, e := range rejections {
msgs = append(msgs, e.Error())
}
summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
strings.Join(msgs, "; ")))
summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error())
}
return false, summary
}

View file

@ -10,8 +10,8 @@ import (
"errors"
"fmt"
"os"
"strings"
"github.com/containers/image/v5/internal/multierr"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
@ -194,7 +194,7 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
ValidateSignedDockerReference: func(ref string) error {
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
}
return nil
},
@ -253,7 +253,7 @@ func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image pri
// Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough
default:
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
}
rejections = append(rejections, reason)
}
@ -270,12 +270,7 @@ func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image pri
case 1:
summary = rejections[0]
default:
var msgs []string
for _, e := range rejections {
msgs = append(msgs, e.Error())
}
summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
strings.Join(msgs, "; ")))
summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error())
}
return false, summary
}

View file

@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
newParsedRef, err := reference.ParseNamed(newNamedRef)
if err != nil {
return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err)
return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err)
}
return newParsedRef, nil
}

View file

@ -173,7 +173,11 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
}); err != nil {
return err
}
s.untrustedDockerManifestDigest = digest.Digest(digestString)
digestValue, err := digest.Parse(digestString)
if err != nil {
return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
}
s.untrustedDockerManifestDigest = digestValue
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.untrustedDockerReference,

View file

@ -12,6 +12,7 @@ import (
"io"
"os"
"path/filepath"
"slices"
"sync"
"sync/atomic"
@ -34,7 +35,6 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
var (
@ -59,7 +59,7 @@ type storageImageDestination struct {
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
manifestDigest digest.Digest // Valid if len(manifest) != 0
untrustedDiffIDValues []digest.Digest // From configs RootFS.DiffIDs, valid if not nil
untrustedDiffIDValues []digest.Digest // From configs RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
signatures []byte // Signature contents, temporary
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
metadata storageImageMetadata // Metadata contents being built
@ -94,11 +94,11 @@ type storageImageDestinationLockProtected struct {
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames)
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
// They are looked up in the order they are mentioned above.
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer
// Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
filenames map[digest.Digest]string
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
@ -145,13 +145,13 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
},
indexToStorageID: make(map[int]string),
lockProtected: storageImageDestinationLockProtected{
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
indexToTOCDigest: make(map[int]digest.Digest),
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
filenames: make(map[digest.Digest]string),
fileSizes: make(map[digest.Digest]int64),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
indexToTOCDigest: make(map[int]digest.Digest),
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
filenames: make(map[digest.Digest]string),
fileSizes: make(map[digest.Digest]int64),
},
}
dest.Compat = impl.AddCompat(dest)
@ -167,13 +167,11 @@ func (s *storageImageDestination) Reference() types.ImageReference {
// Close cleans up the temporary directory and additional layer store handlers.
func (s *storageImageDestination) Close() error {
// This is outside of the scope of HasThreadSafePutBlob, so we dont need to hold s.lock.
for _, al := range s.lockProtected.blobAdditionalLayer {
for _, al := range s.lockProtected.indexToAdditionalLayer {
al.Release()
}
for _, v := range s.lockProtected.diffOutputs {
if v.Target != "" {
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
}
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
}
return os.RemoveAll(s.directory)
}
@ -310,6 +308,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
if err != nil {
return private.UploadedBlob{}, err
}
succeeded := false
defer func() {
if !succeeded {
_ = s.imageRef.transport.store.CleanupStagedLayer(out)
}
}()
if out.TOCDigest == "" && out.UncompressedDigest == "" {
return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
@ -332,6 +336,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
s.lockProtected.diffOutputs[options.LayerIndex] = out
s.lock.Unlock()
succeeded = true
return private.UploadedBlob{
Digest: blobDigest,
Size: srcInfo.Size,
@ -377,14 +382,24 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
s.lock.Lock()
defer s.lock.Unlock()
if options.SrcRef != nil {
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
// Check if we have the layer in the underlying additional layer store.
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String())
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
} else if err == nil {
s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest()
s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer
alsTOCDigest := aLayer.TOCDigest()
if alsTOCDigest != options.TOCDigest {
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
// probably go on reading the layer from other sources.
//
// Currently it should not be possible for alsTOCDigest to be set and not the expected value, but theres
// not that much benefit to checking for equality — we trust the FUSE server to validate the digest either way.
return false, private.ReusedBlob{}, fmt.Errorf("additional layer for TOCDigest %q reports unexpected TOCDigest %q",
options.TOCDigest, alsTOCDigest)
}
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
s.lockProtected.indexToAdditionalLayer[*options.LayerIndex] = aLayer
return true, private.ReusedBlob{
Digest: blobDigest,
Size: aLayer.CompressedSize(),
@ -564,7 +579,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
}
// ordinaryImageID is a digest of a config, which is a JSON value.
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex()
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
return tocImageID
}
@ -651,11 +666,11 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
defer s.lock.Unlock()
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
}
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
return d.Hex(), true // This looks like chain IDs, and it uses the traditional value.
return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
}
return "", false
}
@ -731,7 +746,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
id := layerIDComponent
if !layerIDComponentStandalone || parentLayer != "" {
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex()
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
}
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.
@ -767,7 +782,13 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
return nil, nil
}
untrustedUncompressedDigest = d
// While the contents of the digest are untrusted, make sure at least the _format_ is valid,
// because we are going to write it to durable storage in expectedLayerDiffIDFlag .
if err := untrustedUncompressedDigest.Validate(); err != nil {
return nil, err
}
}
flags := make(map[string]interface{})
@ -793,7 +814,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
}
s.lock.Lock()
al, ok := s.lockProtected.blobAdditionalLayer[layerDigest]
al, ok := s.lockProtected.indexToAdditionalLayer[index]
s.lock.Unlock()
if ok {
layer, err := al.PutAs(newLayerID, parentLayer, nil)
@ -930,7 +951,6 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
// Either way this function does not need the protection of s.lock.
if s.manifest == nil {
logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex)
return "", nil
}
@ -1201,7 +1221,7 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
if err != nil {
return err
}
s.manifest = slices.Clone(manifestBlob)
s.manifest = bytes.Clone(manifestBlob)
s.manifestDigest = digest
return nil
}

View file

@ -6,6 +6,7 @@ package storage
import (
"context"
"fmt"
"slices"
"strings"
"github.com/containers/image/v5/docker/reference"
@ -15,7 +16,6 @@ import (
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte

View file

@ -107,12 +107,11 @@ func (s *storageImageSource) Close() error {
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
// We need a valid digest value.
digest := info.Digest
err = digest.Validate()
if err != nil {
if err := digest.Validate(); err != nil {
return nil, 0, err
}
@ -154,7 +153,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
// NOTE: the blob is first written to a temporary file and subsequently
// closed. The intention is to keep the time we own the storage lock
// as short as possible to allow other processes to access the storage.
rc, n, _, err = s.getBlobAndLayerID(digest, layers)
rc, n, _, err := s.getBlobAndLayerID(digest, layers)
if err != nil {
return nil, 0, err
}
@ -177,7 +176,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
// On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically
// cleaned up on process termination (or if the caller forgets to invoke Close())
// On older versions of Windows we will have to fallback to relying on the caller to invoke Close()
if err := os.Remove(tmpFile.Name()); err != nil {
if err := os.Remove(tmpFile.Name()); err == nil {
tmpFileRemovePending = false
}
@ -308,9 +307,6 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
if err != nil {
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
}
if layer.UncompressedSize < 0 {
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
}
blobDigest := layer.UncompressedDigest
if blobDigest == "" {
@ -332,12 +328,16 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
}
}
size := layer.UncompressedSize
if size < 0 {
size = -1
}
s.getBlobMutex.Lock()
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
s.getBlobMutex.Unlock()
blobInfo := types.BlobInfo{
Digest: blobDigest,
Size: layer.UncompressedSize,
Size: size,
MediaType: uncompressedLayerType,
}
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
@ -453,10 +453,16 @@ func (s *storageImageSource) getSize() (int64, error) {
if err != nil {
return -1, err
}
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 {
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || (layer.TOCDigest == "" && layer.UncompressedSize < 0) {
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
}
sum += layer.UncompressedSize
// FIXME: We allow layer.UncompressedSize < 0 above, because currently images in an Additional Layer Store dont provide that value.
// Right now, various callers in Podman (and, also, newImage in this package) dont expect the size computation to fail.
// Should we update the callers, or do we need to continue returning inaccurate information here? Or should we pay the cost
// to compute the size from the diff?
if layer.UncompressedSize >= 0 {
sum += layer.UncompressedSize
}
if layer.Parent == "" {
break
}

View file

@ -3,6 +3,7 @@ package tarball
import (
"context"
"fmt"
"maps"
"os"
"strings"
@ -10,7 +11,6 @@ import (
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
)
// ConfigUpdater is an interface that ImageReferences for "tarball" images also

View file

@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"maps"
"os"
"runtime"
"strings"
@ -18,7 +19,6 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
)
type tarballImageSource struct {
@ -117,7 +117,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
history = append(history, imgspecv1.History{
Created: &blobTime,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Encoded(), os.PathSeparator),
Comment: comment,
})
// Use the mtime of the most recently modified file as the image's creation time.

View file

@ -28,11 +28,11 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
transportName, withinTransport, valid := strings.Cut(imgName, ":")
if !valid {
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
return nil, fmt.Errorf(`Invalid image name %q, expected colon-separated transport:reference`, imgName)
}
transport := transports.Get(transportName)
if transport == nil {
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
return nil, fmt.Errorf(`Invalid image name %q, unknown transport %q`, imgName, transportName)
}
return transport.ParseReference(withinTransport)
}

View file

@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 30
VersionMinor = 31
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1