parent
326f0cfa2f
commit
5c292c61c6
1437 changed files with 208886 additions and 87131 deletions
29
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
29
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
|
|
@ -17,8 +17,9 @@ import (
|
|||
|
||||
// Options defines the option to retry.
|
||||
type Options struct {
|
||||
MaxRetry int // The number of times to possibly retry.
|
||||
Delay time.Duration // The delay to use between retries, if set.
|
||||
MaxRetry int // The number of times to possibly retry.
|
||||
Delay time.Duration // The delay to use between retries, if set.
|
||||
IsErrorRetryable func(error) bool
|
||||
}
|
||||
|
||||
// RetryOptions is deprecated, use Options.
|
||||
|
|
@ -31,6 +32,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, options *Opti
|
|||
|
||||
// IfNecessary retries the operation in exponential backoff with the retry Options.
|
||||
func IfNecessary(ctx context.Context, operation func() error, options *Options) error {
|
||||
var isRetryable func(error) bool
|
||||
if options.IsErrorRetryable != nil {
|
||||
isRetryable = options.IsErrorRetryable
|
||||
} else {
|
||||
isRetryable = IsErrorRetryable
|
||||
}
|
||||
err := operation()
|
||||
for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ {
|
||||
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
|
||||
|
|
@ -49,7 +56,11 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options)
|
|||
return err
|
||||
}
|
||||
|
||||
func isRetryable(err error) bool {
|
||||
// IsErrorRetryable makes a HEURISTIC determination whether it is worth retrying upon encountering an error.
|
||||
// That heuristic is NOT STABLE and it CAN CHANGE AT ANY TIME.
|
||||
// Callers that have a hard requirement for specific treatment of a class of errors should make their own check
|
||||
// instead of relying on this function maintaining its past behavior.
|
||||
func IsErrorRetryable(err error) bool {
|
||||
switch err {
|
||||
case nil:
|
||||
return false
|
||||
|
|
@ -72,18 +83,18 @@ func isRetryable(err error) bool {
|
|||
}
|
||||
return true
|
||||
case *net.OpError:
|
||||
return isRetryable(e.Err)
|
||||
return IsErrorRetryable(e.Err)
|
||||
case *url.Error: // This includes errors returned by the net/http client.
|
||||
if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF
|
||||
return true
|
||||
}
|
||||
return isRetryable(e.Err)
|
||||
return IsErrorRetryable(e.Err)
|
||||
case syscall.Errno:
|
||||
return isErrnoRetryable(e)
|
||||
case errcode.Errors:
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range e {
|
||||
if !isRetryable(e[i]) {
|
||||
if !IsErrorRetryable(e[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -91,7 +102,7 @@ func isRetryable(err error) bool {
|
|||
case *multierror.Error:
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range e.Errors {
|
||||
if !isRetryable(e.Errors[i]) {
|
||||
if !IsErrorRetryable(e.Errors[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -102,11 +113,11 @@ func isRetryable(err error) bool {
|
|||
}
|
||||
if unwrappable, ok := e.(unwrapper); ok {
|
||||
err = unwrappable.Unwrap()
|
||||
return isRetryable(err)
|
||||
return IsErrorRetryable(err)
|
||||
}
|
||||
case unwrapper: // Test this last, because various error types might implement .Unwrap()
|
||||
err = e.Unwrap()
|
||||
return isRetryable(err)
|
||||
return IsErrorRetryable(err)
|
||||
}
|
||||
|
||||
return false
|
||||
|
|
|
|||
26
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
26
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
|
|
@ -43,7 +43,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
|||
stream.reader = bar.ProxyReader(stream.reader)
|
||||
|
||||
// === Decrypt the stream, if required.
|
||||
decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo)
|
||||
decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
|
@ -78,7 +78,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
|||
// Before relaxing this, see the original pull request’s review if there are other reasons to reject this.
|
||||
return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
|
||||
}
|
||||
encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
|
||||
encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
|
@ -104,12 +104,11 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
|||
if !isConfig {
|
||||
options.LayerIndex = &layerIndex
|
||||
}
|
||||
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
|
||||
destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
|
||||
}
|
||||
|
||||
uploadedInfo.Annotations = stream.info.Annotations
|
||||
uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob)
|
||||
|
||||
compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
|
||||
decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
|
||||
|
|
@ -169,3 +168,20 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
|||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo.
|
||||
func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo {
|
||||
// The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size.
|
||||
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
|
||||
// of the generic code in this package.
|
||||
return types.BlobInfo{
|
||||
Digest: uploadedBlob.Digest,
|
||||
Size: uploadedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
Annotations: inputInfo.Annotations,
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto.
|
||||
CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream.
|
||||
CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream.
|
||||
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream.
|
||||
}
|
||||
}
|
||||
|
|
|
|||
56
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
56
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
|
|
@ -6,10 +6,30 @@ import (
|
|||
"io"
|
||||
|
||||
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var (
|
||||
// defaultCompressionFormat is used if the destination transport requests
|
||||
// compression, and the user does not explicitly instruct us to use an algorithm.
|
||||
defaultCompressionFormat = &compression.Gzip
|
||||
|
||||
// compressionBufferSize is the buffer size used to compress a blob
|
||||
compressionBufferSize = 1048576
|
||||
|
||||
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// using the algorithm that the media type says it should be compressed with
|
||||
expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
|
||||
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
|
||||
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
|
||||
}
|
||||
)
|
||||
|
||||
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
|
||||
|
|
@ -109,13 +129,13 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
|||
if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
var uploadedAlgorithm *compressiontypes.Algorithm
|
||||
if ic.c.compressionFormat != nil {
|
||||
uploadedAlgorithm = ic.c.compressionFormat
|
||||
if ic.compressionFormat != nil {
|
||||
uploadedAlgorithm = ic.compressionFormat
|
||||
} else {
|
||||
uploadedAlgorithm = defaultCompressionFormat
|
||||
}
|
||||
|
||||
reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm)
|
||||
reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm)
|
||||
// Note: reader must be closed on all return paths.
|
||||
stream.reader = reader
|
||||
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
|
||||
|
|
@ -137,7 +157,7 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
|||
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
|
||||
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
|
||||
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
|
||||
ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() {
|
||||
ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
|
||||
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
||||
// re-compressed using the desired format.
|
||||
logrus.Debugf("Blob will be converted")
|
||||
|
|
@ -153,20 +173,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
|
|||
}
|
||||
}()
|
||||
|
||||
recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat)
|
||||
recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat)
|
||||
// Note: recompressed must be closed on all return paths.
|
||||
stream.reader = recompressed
|
||||
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
|
||||
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
succeeded = true
|
||||
return &bpCompressionStepData{
|
||||
operation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.c.compressionFormat,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: ic.c.compressionFormat.Name(),
|
||||
uploadedCompressorName: ic.compressionFormat.Name(),
|
||||
closers: []io.Closer{decompressed, recompressed},
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -183,7 +203,7 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
|
|||
}
|
||||
// Note: s must be closed on all return paths.
|
||||
stream.reader = s
|
||||
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
|
||||
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
|
|
@ -199,7 +219,9 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
|
|||
}
|
||||
|
||||
// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
|
||||
func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
|
||||
// This does not change the sourceStream parameter; we include it for symmetry with other
|
||||
// pipeline steps.
|
||||
func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData,
|
||||
layerCompressionChangeSupported bool) *bpCompressionStepData {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
// Remember if the original blob was compressed, and if so how, so that if
|
||||
|
|
@ -232,13 +254,11 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
|
|||
if *annotations == nil {
|
||||
*annotations = map[string]string{}
|
||||
}
|
||||
for k, v := range d.uploadedAnnotations {
|
||||
(*annotations)[k] = v
|
||||
}
|
||||
maps.Copy(*annotations, d.uploadedAnnotations)
|
||||
}
|
||||
|
||||
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
|
||||
// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties.
|
||||
// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
|
||||
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
|
||||
encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
|
||||
// Don’t record any associations that involve encrypted data. This is a bit crude,
|
||||
|
|
@ -298,24 +318,24 @@ func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, co
|
|||
}
|
||||
|
||||
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
|
||||
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
|
||||
func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
|
||||
err := errors.New("Internal error: unexpected panic in compressGoroutine")
|
||||
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
|
||||
}()
|
||||
|
||||
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
|
||||
err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel)
|
||||
}
|
||||
|
||||
// compressedStream returns a stream the input reader compressed using format, and a metadata map.
|
||||
// The caller must close the returned reader.
|
||||
// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
|
||||
func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
|
||||
func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
annotations := map[string]string{}
|
||||
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
||||
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
||||
// we don’t care.
|
||||
go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
|
||||
go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
|
||||
return pipeReader, annotations
|
||||
}
|
||||
|
|
|
|||
1139
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
1139
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
File diff suppressed because it is too large
Load diff
109
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
109
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
|
|
@ -7,6 +7,8 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/ocicrypt"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
|
||||
|
|
@ -18,12 +20,9 @@ func isOciEncrypted(mediatype string) bool {
|
|||
// isEncrypted checks if an image is encrypted
|
||||
func isEncrypted(i types.Image) bool {
|
||||
layers := i.LayerInfos()
|
||||
for _, l := range layers {
|
||||
if isOciEncrypted(l.MediaType) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(layers, func(l types.BlobInfo) bool {
|
||||
return isOciEncrypted(l.MediaType)
|
||||
})
|
||||
}
|
||||
|
||||
// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
|
||||
|
|
@ -34,30 +33,33 @@ type bpDecryptionStepData struct {
|
|||
// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
|
||||
// srcInfo is only used for error messages.
|
||||
// Returns data for other steps; the caller should eventually use updateCryptoOperation.
|
||||
func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
|
||||
if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil {
|
||||
desc := imgspecv1.Descriptor{
|
||||
Annotations: stream.info.Annotations,
|
||||
}
|
||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
stream.info.Digest = decryptedDigest
|
||||
stream.info.Size = -1
|
||||
for k := range stream.info.Annotations {
|
||||
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
|
||||
delete(stream.info.Annotations, k)
|
||||
}
|
||||
}
|
||||
func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
|
||||
if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil {
|
||||
return &bpDecryptionStepData{
|
||||
decrypting: true,
|
||||
decrypting: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
|
||||
}
|
||||
|
||||
desc := imgspecv1.Descriptor{
|
||||
Annotations: stream.info.Annotations,
|
||||
}
|
||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
stream.info.Digest = decryptedDigest
|
||||
stream.info.Size = -1
|
||||
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
|
||||
return strings.HasPrefix(k, "org.opencontainers.image.enc")
|
||||
})
|
||||
return &bpDecryptionStepData{
|
||||
decrypting: false,
|
||||
decrypting: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -77,34 +79,39 @@ type bpEncryptionStepData struct {
|
|||
// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
|
||||
// srcInfo is primarily used for error messages.
|
||||
// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
|
||||
func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
|
||||
func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
|
||||
decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
|
||||
if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
|
||||
var annotations map[string]string
|
||||
if !decryptionStep.decrypting {
|
||||
annotations = srcInfo.Annotations
|
||||
}
|
||||
desc := imgspecv1.Descriptor{
|
||||
MediaType: srcInfo.MediaType,
|
||||
Digest: srcInfo.Digest,
|
||||
Size: srcInfo.Size,
|
||||
Annotations: annotations,
|
||||
}
|
||||
reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
stream.info.Digest = ""
|
||||
stream.info.Size = -1
|
||||
if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil {
|
||||
return &bpEncryptionStepData{
|
||||
encrypting: true,
|
||||
finalizer: finalizer,
|
||||
encrypting: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
|
||||
}
|
||||
|
||||
var annotations map[string]string
|
||||
if !decryptionStep.decrypting {
|
||||
annotations = srcInfo.Annotations
|
||||
}
|
||||
desc := imgspecv1.Descriptor{
|
||||
MediaType: srcInfo.MediaType,
|
||||
Digest: srcInfo.Digest,
|
||||
Size: srcInfo.Size,
|
||||
Annotations: annotations,
|
||||
}
|
||||
reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
stream.info.Digest = ""
|
||||
stream.info.Size = -1
|
||||
return &bpEncryptionStepData{
|
||||
encrypting: false,
|
||||
encrypting: true,
|
||||
finalizer: finalizer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -122,8 +129,6 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty
|
|||
if *annotations == nil {
|
||||
*annotations = map[string]string{}
|
||||
}
|
||||
for k, v := range encryptAnnotations {
|
||||
(*annotations)[k] = v
|
||||
}
|
||||
maps.Copy(*annotations, encryptAnnotations)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
69
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
69
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
|
|
@ -6,9 +6,12 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
||||
|
|
@ -16,10 +19,13 @@ import (
|
|||
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
|
||||
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
|
||||
|
||||
// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption.
|
||||
var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest}
|
||||
|
||||
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
|
||||
type orderedSet struct {
|
||||
list []string
|
||||
included map[string]struct{}
|
||||
included *set.Set[string]
|
||||
}
|
||||
|
||||
// newOrderedSet creates a correctly initialized orderedSet.
|
||||
|
|
@ -27,15 +33,15 @@ type orderedSet struct {
|
|||
func newOrderedSet() *orderedSet {
|
||||
return &orderedSet{
|
||||
list: []string{},
|
||||
included: map[string]struct{}{},
|
||||
included: set.New[string](),
|
||||
}
|
||||
}
|
||||
|
||||
// append adds s to the end of os, only if it is not included already.
|
||||
func (os *orderedSet) append(s string) {
|
||||
if _, ok := os.included[s]; !ok {
|
||||
if !os.included.Contains(s) {
|
||||
os.list = append(os.list, s)
|
||||
os.included[s] = struct{}{}
|
||||
os.included.Add(s)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -74,18 +80,42 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
|||
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
|
||||
}
|
||||
|
||||
if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
|
||||
return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
|
||||
preferredMIMEType: srcType,
|
||||
otherMIMETypeCandidates: []string{},
|
||||
}, nil
|
||||
if len(destSupportedManifestMIMETypes) == 0 {
|
||||
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) {
|
||||
return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
|
||||
preferredMIMEType: srcType,
|
||||
otherMIMETypeCandidates: []string{},
|
||||
}, nil
|
||||
}
|
||||
destSupportedManifestMIMETypes = ociEncryptionMIMETypes
|
||||
}
|
||||
supportedByDest := map[string]struct{}{}
|
||||
supportedByDest := set.New[string]()
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
|
||||
supportedByDest[t] = struct{}{}
|
||||
supportedByDest.Add(t)
|
||||
}
|
||||
}
|
||||
if supportedByDest.Empty() {
|
||||
if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes
|
||||
return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty")
|
||||
}
|
||||
// We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved.
|
||||
if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption.
|
||||
return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting")
|
||||
}
|
||||
// destSupportedManifestMIMETypes has three possible origins:
|
||||
if in.forceManifestMIMEType != "" { // 1. forceManifestType specified
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption",
|
||||
in.forceManifestMIMEType)
|
||||
}
|
||||
if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes
|
||||
// Coverage: This should never happen, ociEncryptionMIMETypes all support encryption
|
||||
return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well")
|
||||
}
|
||||
// 3. destination does not support encryption.
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption",
|
||||
strings.Join(destSupportedManifestMIMETypes, ", "))
|
||||
}
|
||||
|
||||
// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
|
||||
// So, build a list of types to try in order of decreasing preference.
|
||||
|
|
@ -96,7 +126,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
|||
prioritizedTypes := newOrderedSet()
|
||||
|
||||
// First of all, prefer to keep the original manifest unmodified.
|
||||
if _, ok := supportedByDest[srcType]; ok {
|
||||
if supportedByDest.Contains(srcType) {
|
||||
prioritizedTypes.append(srcType)
|
||||
}
|
||||
if in.cannotModifyManifestReason != "" {
|
||||
|
|
@ -113,18 +143,20 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
|||
|
||||
// Then use our list of preferred types.
|
||||
for _, t := range preferredManifestMIMETypes {
|
||||
if _, ok := supportedByDest[t]; ok {
|
||||
if supportedByDest.Contains(t) {
|
||||
prioritizedTypes.append(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, try anything else the destination supports.
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
prioritizedTypes.append(t)
|
||||
if supportedByDest.Contains(t) {
|
||||
prioritizedTypes.append(t)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
|
||||
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
|
||||
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen.
|
||||
return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
|
||||
}
|
||||
res := manifestConversionPlan{
|
||||
|
|
@ -166,11 +198,8 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
|
|||
prioritizedTypes := newOrderedSet()
|
||||
// The first priority is the current type, if it's in the list, since that lets us avoid a
|
||||
// conversion that isn't strictly necessary.
|
||||
for _, t := range destSupportedMIMETypes {
|
||||
if t == currentListMIMEType {
|
||||
prioritizedTypes.append(currentListMIMEType)
|
||||
break
|
||||
}
|
||||
if slices.Contains(destSupportedMIMETypes, currentListMIMEType) {
|
||||
prioritizedTypes.append(currentListMIMEType)
|
||||
}
|
||||
// Pick out the other list types that we support.
|
||||
for _, t := range destSupportedMIMETypes {
|
||||
|
|
|
|||
219
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
Normal file
219
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type instanceCopyKind int
|
||||
|
||||
const (
|
||||
instanceCopyCopy instanceCopyKind = iota
|
||||
instanceCopyClone
|
||||
)
|
||||
|
||||
type instanceCopy struct {
|
||||
op instanceCopyKind
|
||||
sourceDigest digest.Digest
|
||||
}
|
||||
|
||||
// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list.
|
||||
func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy {
|
||||
res := []instanceCopy{}
|
||||
for i, instanceDigest := range instanceDigests {
|
||||
if options.ImageListSelection == CopySpecificImages &&
|
||||
!slices.Contains(options.Instances, instanceDigest) {
|
||||
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
||||
continue
|
||||
}
|
||||
res = append(res, instanceCopy{
|
||||
op: instanceCopyCopy,
|
||||
sourceDigest: instanceDigest,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// copyMultipleImages copies some or all of an image list's instances, using
|
||||
// policyContext to validate source image admissibility.
|
||||
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
|
||||
// Parse the list and get a copy of the original value after it's re-encoded.
|
||||
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading manifest list: %w", err)
|
||||
}
|
||||
originalList, err := internalManifest.ListFromBlob(manifestList, manifestType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
|
||||
}
|
||||
updatedList := originalList.CloneInternal()
|
||||
|
||||
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
|
||||
"Getting image list signatures",
|
||||
"Checking if image list destination supports signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
// expecting, and check that the source manifest matches it.
|
||||
destIsDigestedReference := false
|
||||
if named := c.dest.Reference().DockerReference(); named != nil {
|
||||
if digested, ok := named.(reference.Digested); ok {
|
||||
destIsDigestedReference = true
|
||||
matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.New("Digest of source image's manifest would not match destination reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if we're allowed to modify the manifest list.
|
||||
// If we can, set to the empty string. If we can't, set to the reason why.
|
||||
// Compare, and perhaps keep in sync with, the version in copySingleImage.
|
||||
cannotModifyManifestListReason := ""
|
||||
if len(sigs) > 0 {
|
||||
cannotModifyManifestListReason = "Would invalidate signatures"
|
||||
}
|
||||
if destIsDigestedReference {
|
||||
cannotModifyManifestListReason = "Destination specifies a digest"
|
||||
}
|
||||
if options.PreserveDigests {
|
||||
cannotModifyManifestListReason = "Instructed to preserve digests"
|
||||
}
|
||||
|
||||
// Determine if we'll need to convert the manifest list to a different format.
|
||||
forceListMIMEType := options.ForceManifestMIMEType
|
||||
switch forceListMIMEType {
|
||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
|
||||
forceListMIMEType = manifest.DockerV2ListMediaType
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
forceListMIMEType = imgspecv1.MediaTypeImageIndex
|
||||
}
|
||||
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
|
||||
}
|
||||
if selectedListType != originalList.MIMEType() {
|
||||
if cannotModifyManifestListReason != "" {
|
||||
return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy each image, or just the ones we want to copy, in turn.
|
||||
instanceDigests := updatedList.Instances()
|
||||
instanceEdits := []internalManifest.ListEdit{}
|
||||
instanceCopyList := prepareInstanceCopies(instanceDigests, options)
|
||||
c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests))
|
||||
for i, instance := range instanceCopyList {
|
||||
// Update instances to be edited by their `ListOperation` and
|
||||
// populate necessary fields.
|
||||
switch instance.op {
|
||||
case instanceCopyCopy:
|
||||
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
|
||||
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
|
||||
}
|
||||
// Record the result of a possible conversion here.
|
||||
instanceEdits = append(instanceEdits, internalManifest.ListEdit{
|
||||
ListOperation: internalManifest.ListOpUpdate,
|
||||
UpdateOldDigest: instance.sourceDigest,
|
||||
UpdateDigest: updatedManifestDigest,
|
||||
UpdateSize: int64(len(updatedManifest)),
|
||||
UpdateMediaType: updatedManifestType})
|
||||
default:
|
||||
return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
|
||||
}
|
||||
}
|
||||
|
||||
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
|
||||
if err = updatedList.EditInstances(instanceEdits); err != nil {
|
||||
return nil, fmt.Errorf("updating manifest list: %w", err)
|
||||
}
|
||||
|
||||
// Iterate through supported list types, preferred format first.
|
||||
c.Printf("Writing manifest list to image destination\n")
|
||||
var errs []string
|
||||
for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
|
||||
var attemptedList internalManifest.ListPublic = updatedList
|
||||
|
||||
logrus.Debugf("Trying to use manifest list type %s…", thisListType)
|
||||
|
||||
// Perform the list conversion, if we need one.
|
||||
if thisListType != updatedList.MIMEType() {
|
||||
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the updates or a type conversion meaningfully changed the list of images
|
||||
// by serializing them both so that we can compare them.
|
||||
attemptedManifestList, err := attemptedList.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
|
||||
}
|
||||
originalManifestList, err := originalList.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
|
||||
}
|
||||
|
||||
// If we can't just use the original value, but we have to change it, flag an error.
|
||||
if !bytes.Equal(attemptedManifestList, originalManifestList) {
|
||||
if cannotModifyManifestListReason != "" {
|
||||
return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
|
||||
}
|
||||
logrus.Debugf("Manifest list has been updated")
|
||||
} else {
|
||||
// We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest.
|
||||
attemptedManifestList = manifestList
|
||||
}
|
||||
|
||||
// Save the manifest list.
|
||||
err = c.dest.PutManifest(ctx, attemptedManifestList, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err)
|
||||
errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err))
|
||||
continue
|
||||
}
|
||||
errs = nil
|
||||
manifestList = attemptedManifestList
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
|
||||
// Sign the manifest list.
|
||||
newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
|
||||
c.Printf("Storing list signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
|
||||
return nil, fmt.Errorf("writing signatures: %w", err)
|
||||
}
|
||||
|
||||
return manifestList, nil
|
||||
}
|
||||
6
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
6
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
|
|
@ -7,8 +7,8 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/vbauerster/mpb/v7"
|
||||
"github.com/vbauerster/mpb/v7/decor"
|
||||
"github.com/vbauerster/mpb/v8"
|
||||
"github.com/vbauerster/mpb/v8/decor"
|
||||
)
|
||||
|
||||
// newProgressPool creates a *mpb.Progress.
|
||||
|
|
@ -120,7 +120,7 @@ func (bar *progressBar) mark100PercentComplete() {
|
|||
bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
|
||||
} else {
|
||||
// -1 = unknown size
|
||||
// 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
|
||||
// 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known
|
||||
// size (possible at least in theory), in mpb, zero-sized progress bars are treated
|
||||
// as unknown size, in particular they are not configured to be marked as
|
||||
// complete on bar.Current() reaching bar.total (because that would happen already
|
||||
|
|
|
|||
98
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
98
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
|
|
@ -7,11 +7,49 @@ import (
|
|||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
internalsig "github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/signature"
|
||||
internalSigner "github.com/containers/image/v5/internal/signer"
|
||||
"github.com/containers/image/v5/signature/sigstore"
|
||||
"github.com/containers/image/v5/signature/simplesigning"
|
||||
"github.com/containers/image/v5/transports"
|
||||
)
|
||||
|
||||
// setupSigners initializes c.signers based on options.
|
||||
func (c *copier) setupSigners(options *Options) error {
|
||||
c.signers = append(c.signers, options.Signers...)
|
||||
// c.signersToClose is intentionally not updated with options.Signers.
|
||||
|
||||
// We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need
|
||||
// to clean up any created signers on failure.
|
||||
|
||||
if options.SignBy != "" {
|
||||
opts := []simplesigning.Option{
|
||||
simplesigning.WithKeyFingerprint(options.SignBy),
|
||||
}
|
||||
if options.SignPassphrase != "" {
|
||||
opts = append(opts, simplesigning.WithPassphrase(options.SignPassphrase))
|
||||
}
|
||||
signer, err := simplesigning.NewSigner(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.signers = append(c.signers, signer)
|
||||
c.signersToClose = append(c.signersToClose, signer)
|
||||
}
|
||||
|
||||
if options.SignBySigstorePrivateKeyFile != "" {
|
||||
signer, err := sigstore.NewSigner(
|
||||
sigstore.WithPrivateKeyFile(options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.signers = append(c.signers, signer)
|
||||
c.signersToClose = append(c.signersToClose, signer)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sourceSignatures returns signatures from unparsedSource based on options,
|
||||
// and verifies that they can be used (to avoid copying a large image when we
|
||||
// can tell in advance that it would ultimately fail)
|
||||
|
|
@ -37,38 +75,13 @@ func (c *copier) sourceSignatures(ctx context.Context, unparsed private.Unparsed
|
|||
return sigs, nil
|
||||
}
|
||||
|
||||
// createSignature creates a new signature of manifest using keyIdentity.
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) (internalsig.Signature, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing GPG: %w", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return nil, fmt.Errorf("Signing not supported: %w", err)
|
||||
// createSignatures creates signatures for manifest and an optional identity.
|
||||
func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) {
|
||||
if len(c.signers) == 0 {
|
||||
// We must exit early here, otherwise copies with no Docker reference wouldn’t be possible.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if identity != nil {
|
||||
if reference.IsNameOnly(identity) {
|
||||
return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity)
|
||||
}
|
||||
} else {
|
||||
identity = c.dest.Reference().DockerReference()
|
||||
if identity == nil {
|
||||
return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
}
|
||||
|
||||
c.Printf("Signing manifest using simple signing\n")
|
||||
newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
}
|
||||
return internalsig.SimpleSigningFromBlob(newSig), nil
|
||||
}
|
||||
|
||||
// createSigstoreSignature creates a new sigstore signature of manifest using privateKeyFile and identity.
|
||||
func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string, passphrase []byte, identity reference.Named) (internalsig.Signature, error) {
|
||||
if identity != nil {
|
||||
if reference.IsNameOnly(identity) {
|
||||
return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String())
|
||||
|
|
@ -80,10 +93,23 @@ func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string,
|
|||
}
|
||||
}
|
||||
|
||||
c.Printf("Signing manifest using a sigstore signature\n")
|
||||
newSig, err := sigstore.SignDockerManifestWithPrivateKeyFileUnstable(manifest, identity, privateKeyFile, passphrase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
res := make([]internalsig.Signature, 0, len(c.signers))
|
||||
for signerIndex, signer := range c.signers {
|
||||
msg := internalSigner.ProgressMessage(signer)
|
||||
if len(c.signers) == 1 {
|
||||
c.Printf("Creating signature: %s\n", msg)
|
||||
} else {
|
||||
c.Printf("Creating signature %d: %s\n", signerIndex+1, msg)
|
||||
}
|
||||
newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity)
|
||||
if err != nil {
|
||||
if len(c.signers) == 1 {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
} else {
|
||||
return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err)
|
||||
}
|
||||
}
|
||||
res = append(res, newSig)
|
||||
}
|
||||
return newSig, nil
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
|||
820
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
Normal file
820
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
Normal file
|
|
@ -0,0 +1,820 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/internal/pkg/platform"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb/v8"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src *image.SourcedImage
|
||||
diffIDsAreNeeded bool
|
||||
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
|
||||
canSubstituteBlobs bool
|
||||
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
|
||||
compressionLevel *int
|
||||
ociEncryptLayers *[]int
|
||||
}
|
||||
|
||||
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
|
||||
}
|
||||
if multiImage {
|
||||
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
}
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
// (The multiImage check above only matches the MIME type, which we have received anyway.
|
||||
// Actual parsing of anything should be deferred.)
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
|
||||
}
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
// expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
|
||||
// one item from a manifest list that matches it, accept that as a match.
|
||||
destIsDigestedReference := false
|
||||
if named := c.dest.Reference().DockerReference(); named != nil {
|
||||
if digested, ok := named.(reference.Digested); ok {
|
||||
destIsDigestedReference = true
|
||||
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
manifestList, _, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
|
||||
}
|
||||
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
sigs, err := c.sourceSignatures(ctx, src, options,
|
||||
"Getting image source signatures",
|
||||
"Checking if image destination supports signatures")
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
// Determine if we're allowed to modify the manifest.
|
||||
// If we can, set to the empty string. If we can't, set to the reason why.
|
||||
// Compare, and perhaps keep in sync with, the version in copyMultipleImages.
|
||||
cannotModifyManifestReason := ""
|
||||
if len(sigs) > 0 {
|
||||
cannotModifyManifestReason = "Would invalidate signatures"
|
||||
}
|
||||
if destIsDigestedReference {
|
||||
cannotModifyManifestReason = "Destination specifies a digest"
|
||||
}
|
||||
if options.PreserveDigests {
|
||||
cannotModifyManifestReason = "Instructed to preserve digests"
|
||||
}
|
||||
|
||||
ic := imageCopier{
|
||||
c: c,
|
||||
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
cannotModifyManifestReason: cannotModifyManifestReason,
|
||||
ociEncryptLayers: options.OciEncryptLayers,
|
||||
}
|
||||
if options.DestinationCtx != nil {
|
||||
// Note that compressionFormat and compressionLevel can be nil.
|
||||
ic.compressionFormat = options.DestinationCtx.CompressionFormat
|
||||
ic.compressionLevel = options.DestinationCtx.CompressionLevel
|
||||
}
|
||||
// Decide whether we can substitute blobs with semantic equivalents:
|
||||
// - Don’t do that if we can’t modify the manifest at all
|
||||
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
|
||||
|
||||
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
|
||||
srcMIMEType: ic.src.ManifestMIMEType,
|
||||
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
|
||||
forceManifestMIMEType: options.ForceManifestMIMEType,
|
||||
requiresOCIEncryption: destRequiresOciEncryption,
|
||||
cannotModifyManifestReason: ic.cannotModifyManifestReason,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
// We set up this part of ic.manifestUpdates quite early, not just around the
|
||||
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
|
||||
// (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
|
||||
// on the expected destination format.
|
||||
if manifestConversionPlan.preferredMIMETypeNeedsConversion {
|
||||
ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
|
||||
}
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
|
||||
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
|
||||
if options.OptimizeDestinationImageAlreadyExists {
|
||||
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
|
||||
noPendingManifestUpdates := ic.noPendingManifestUpdates()
|
||||
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
|
||||
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
|
||||
isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to compare destination image manifest: %v", err)
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
if isSrcDestManifestEqual {
|
||||
c.Printf("Skipping: image already present at destination\n")
|
||||
return retManifest, retManifestType, retManifestDigest, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
|
||||
// we're altering how they're compressed. If the process succeeds, fine…
|
||||
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
||||
retManifestType = manifestConversionPlan.preferredMIMEType
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
|
||||
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
|
||||
// because we failed to create a manifest of the specified type because the specific manifest type
|
||||
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
|
||||
// have other options available that could still succeed.
|
||||
var manifestTypeRejectedError types.ManifestTypeRejectedError
|
||||
var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
|
||||
isManifestRejected := errors.As(err, &manifestTypeRejectedError)
|
||||
isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
|
||||
if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
|
||||
// We don’t have other options.
|
||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||
// Don’t bother the user with MIME types if we have no choice.
|
||||
return nil, "", "", err
|
||||
}
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
|
||||
// So if we are here, we will definitely be trying to convert the manifest.
|
||||
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
|
||||
for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
|
||||
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
|
||||
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
|
||||
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
||||
if err != nil {
|
||||
logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
|
||||
errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// We have successfully uploaded a manifest.
|
||||
manifestBytes = attemptedManifest
|
||||
retManifestDigest = attemptedManifestDigest
|
||||
retManifestType = manifestMIMEType
|
||||
errs = nil // Mark this as a success so that we don't abort below.
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
}
|
||||
if targetInstance != nil {
|
||||
targetInstance = &retManifestDigest
|
||||
}
|
||||
|
||||
newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
|
||||
if len(sigs) > 0 {
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
|
||||
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return manifestBytes, retManifestType, retManifestDigest, nil
|
||||
}
|
||||
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(sys)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting current platform information %#v: %w", sys, err)
|
||||
}
|
||||
|
||||
options := newOrderedSet()
|
||||
match := false
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
// Waiting for https://github.com/opencontainers/image-spec/pull/777 :
|
||||
// This currently can’t use image.MatchesPlatform because we don’t know what to use
|
||||
// for image.Variant.
|
||||
if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
|
||||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
|
||||
c.OS, c.Architecture, strings.Join(options.list, ", "))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
|
||||
func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
if ic.c.dest.IgnoresEmbeddedDockerReference() {
|
||||
return nil // Destination would prefer us not to update the embedded reference.
|
||||
}
|
||||
destRef := ic.c.dest.Reference().DockerReference()
|
||||
if destRef == nil {
|
||||
return nil // Destination does not care about Docker references
|
||||
}
|
||||
if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
|
||||
return nil // No reference embedded in the manifest, or it matches destRef already.
|
||||
}
|
||||
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
|
||||
transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason)
|
||||
}
|
||||
ic.manifestUpdates.EmbeddedDockerReference = destRef
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ic *imageCopier) noPendingManifestUpdates() bool {
|
||||
return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
|
||||
}
|
||||
|
||||
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
|
||||
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
|
||||
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
|
||||
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
|
||||
if err != nil {
|
||||
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
|
||||
}
|
||||
|
||||
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
|
||||
return false, nil, "", "", nil
|
||||
}
|
||||
|
||||
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
|
||||
return false, nil, "", "", nil
|
||||
}
|
||||
|
||||
destManifestDigest, err := manifest.Digest(destManifest)
|
||||
if err != nil {
|
||||
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
|
||||
if srcManifestDigest != destManifestDigest {
|
||||
return false, nil, "", "", nil
|
||||
}
|
||||
|
||||
// Destination and source manifests, types and digests should all be equivalent
|
||||
return true, destManifest, destManifestType, destManifestDigest, nil
|
||||
}
|
||||
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
numLayers := len(srcInfos)
|
||||
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcInfosUpdated := false
|
||||
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
|
||||
}
|
||||
srcInfos = updatedSrcInfos
|
||||
srcInfosUpdated = true
|
||||
}
|
||||
|
||||
type copyLayerData struct {
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
}
|
||||
|
||||
// The manifest is used to extract the information whether a given
|
||||
// layer is empty.
|
||||
man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manifestLayerInfos := man.LayerInfos()
|
||||
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
|
||||
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
if ic.diffIDsAreNeeded {
|
||||
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
} else {
|
||||
cld.destInfo = srcLayer
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
} else {
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer)
|
||||
}
|
||||
data[index] = cld
|
||||
}
|
||||
|
||||
// Decide which layers to encrypt
|
||||
layersToEncrypt := set.New[int]()
|
||||
var encryptAll bool
|
||||
if ic.ociEncryptLayers != nil {
|
||||
encryptAll = len(*ic.ociEncryptLayers) == 0
|
||||
totalLayers := len(srcInfos)
|
||||
for _, l := range *ic.ociEncryptLayers {
|
||||
// if layer is negative, it is reverse indexed.
|
||||
layersToEncrypt.Add((totalLayers + l) % totalLayers)
|
||||
}
|
||||
|
||||
if encryptAll {
|
||||
for i := 0; i < len(srcInfos); i++ {
|
||||
layersToEncrypt.Add(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := func() error { // A scope for defer
|
||||
progressPool := ic.c.newProgressPool()
|
||||
defer progressPool.Wait()
|
||||
|
||||
// Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool.
|
||||
defer copyGroup.Wait()
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
|
||||
return fmt.Errorf("copying layer: %w", err)
|
||||
}
|
||||
copyGroup.Add(1)
|
||||
go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
|
||||
}
|
||||
|
||||
// A call to copyGroup.Wait() is done at this point by the defer above.
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return cld.err
|
||||
}
|
||||
destInfos[i] = cld.destInfo
|
||||
diffIDs[i] = cld.diffID
|
||||
}
|
||||
|
||||
// WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the
|
||||
// OptimizeDestinationImageAlreadyExists short-circuit conditions
|
||||
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if ic.diffIDsAreNeeded {
|
||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
}
|
||||
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
|
||||
ic.manifestUpdates.LayerInfos = destInfos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
|
||||
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
|
||||
return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool {
|
||||
return a.Digest == b.Digest
|
||||
})
|
||||
}
|
||||
|
||||
// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
|
||||
// stores the resulting config and manifest to the destination, and returns the stored manifest
|
||||
// and its digest.
|
||||
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
|
||||
var pendingImage types.Image = ic.src
|
||||
if !ic.noPendingManifestUpdates() {
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
|
||||
}
|
||||
if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
|
||||
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
|
||||
// So, this can only happen if we are trying to upload using one of the other MIME type candidates.
|
||||
// Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
|
||||
// when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
|
||||
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
|
||||
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
|
||||
return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
|
||||
}
|
||||
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
|
||||
}
|
||||
pendingImage = pi
|
||||
}
|
||||
man, _, err := pendingImage.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("reading manifest: %w", err)
|
||||
}
|
||||
|
||||
if err := ic.copyConfig(ctx, pendingImage); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
ic.c.Printf("Writing manifest to image destination\n")
|
||||
manifestDigest, err := manifest.Digest(man)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if instanceDigest != nil {
|
||||
instanceDigest = &manifestDigest
|
||||
}
|
||||
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
|
||||
logrus.Debugf("Error %v while writing manifest %q", err, string(man))
|
||||
return nil, "", fmt.Errorf("writing manifest: %w", err)
|
||||
}
|
||||
return man, manifestDigest, nil
|
||||
}
|
||||
|
||||
// copyConfig copies config.json, if any, from src to dest.
|
||||
func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
if srcInfo.Digest != "" {
|
||||
if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
|
||||
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
|
||||
return fmt.Errorf("copying config: %w", err)
|
||||
}
|
||||
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
|
||||
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool := ic.c.newProgressPool()
|
||||
defer progressPool.Wait()
|
||||
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
|
||||
defer bar.Abort(false)
|
||||
ic.c.printCopyInfo("config", srcInfo)
|
||||
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
bar.mark100PercentComplete()
|
||||
return destInfo, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if destInfo.Digest != srcInfo.Digest {
|
||||
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
|
||||
// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
|
||||
type diffIDResult struct {
|
||||
digest digest.Digest
|
||||
err error
|
||||
}
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
|
||||
// If the srcInfo doesn't contain compression information, try to compute it from the
|
||||
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
|
||||
// by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
|
||||
// the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(),
|
||||
// which uses the compression information to compute the updated MediaType values.
|
||||
// (Sadly UpdatedImage() is documented to not update MediaTypes from
|
||||
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
|
||||
//
|
||||
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
|
||||
// package (but we should preferably replace/change UpdatedImage instead of productizing
|
||||
// this workaround).
|
||||
if srcInfo.CompressionAlgorithm == nil {
|
||||
switch srcInfo.MediaType {
|
||||
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
|
||||
srcInfo.CompressionAlgorithm = &compression.Gzip
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
srcInfo.CompressionAlgorithm = &compression.Zstd
|
||||
}
|
||||
}
|
||||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
|
||||
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
|
||||
// but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
|
||||
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
|
||||
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
|
||||
|
||||
// Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
|
||||
if canAvoidProcessingCompleteLayer {
|
||||
canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
|
||||
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
|
||||
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
|
||||
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
|
||||
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
|
||||
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
|
||||
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
|
||||
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
||||
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
||||
// the ImageDestination interface lets us pass in.
|
||||
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: canSubstitute,
|
||||
EmptyLayer: emptyLayer,
|
||||
LayerIndex: &layerIndex,
|
||||
SrcRef: srcRef,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
func() { // A scope for defer
|
||||
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
|
||||
defer bar.Abort(false)
|
||||
bar.mark100PercentComplete()
|
||||
}()
|
||||
|
||||
// Throw an event that the layer has been skipped
|
||||
if ic.c.progress != nil && ic.c.progressInterval > 0 {
|
||||
ic.c.progress <- types.ProgressProperties{
|
||||
Event: types.ProgressEventSkipped,
|
||||
Artifact: srcInfo,
|
||||
}
|
||||
}
|
||||
|
||||
return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// A partial pull is managed by the destination storage, that decides what portions
|
||||
// of the source file are not known yet and must be fetched.
|
||||
// Attempt a partial only when the source allows to retrieve a blob partially and
|
||||
// the destination has support for it.
|
||||
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
|
||||
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
|
||||
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
|
||||
hideProgressBar := true
|
||||
defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
|
||||
bar.Abort(hideProgressBar)
|
||||
}()
|
||||
|
||||
proxy := blobChunkAccessorProxy{
|
||||
wrapped: ic.c.rawSource,
|
||||
bar: bar,
|
||||
}
|
||||
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
|
||||
if err == nil {
|
||||
if srcInfo.Size != -1 {
|
||||
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||
}
|
||||
bar.mark100PercentComplete()
|
||||
hideProgressBar = false
|
||||
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob)
|
||||
}
|
||||
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
||||
return false, types.BlobInfo{}
|
||||
}(); reused {
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
|
||||
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
|
||||
defer bar.Abort(false)
|
||||
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
|
||||
diffID := cachedDiffID
|
||||
if diffIDIsNeeded {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return types.BlobInfo{}, "", ctx.Err()
|
||||
case diffIDResult := <-diffIDChan:
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
// Don’t record any associations that involve encrypted data. This is a bit crude,
|
||||
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
|
||||
// might be safe, but it’s not trivially obvious, so let’s be conservative for now.
|
||||
// This crude approach also means we don’t need to record whether a blob is encrypted
|
||||
// in the blob info cache (which would probably be necessary for any more complex logic),
|
||||
// and the simplicity is attractive.
|
||||
if !encryptingOrDecrypting {
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
}
|
||||
diffID = diffIDResult.digest
|
||||
}
|
||||
}
|
||||
|
||||
bar.mark100PercentComplete()
|
||||
return blobInfo, diffID, nil
|
||||
}()
|
||||
}
|
||||
|
||||
// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo.
|
||||
func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo {
|
||||
// The transport is only tasked with finding the blob, determining its size if necessary, and returning the right
|
||||
// compression format if the blob was substituted.
|
||||
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
|
||||
// of the generic code in this package.
|
||||
res := types.BlobInfo{
|
||||
Digest: reusedBlob.Digest,
|
||||
Size: reusedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||
CompressionOperation: reusedBlob.CompressionOperation,
|
||||
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
|
||||
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
|
||||
}
|
||||
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
|
||||
// if the blob was substituted; otherwise, fill it in based
|
||||
// on what we know from the srcInfos we were given.
|
||||
if reusedBlob.Digest == inputInfo.Digest {
|
||||
res.CompressionOperation = inputInfo.CompressionOperation
|
||||
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
|
||||
// perhaps (de/re/)compressing the stream,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
|
||||
if diffIDIsNeeded {
|
||||
diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||
_ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
|
||||
}()
|
||||
|
||||
getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
|
||||
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
|
||||
// reading from the pipe has failed, we don’t really care.
|
||||
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
|
||||
// the return value includes an error indication, which we do check.
|
||||
//
|
||||
// If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
|
||||
// closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
|
||||
go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
|
||||
return pipeWriter
|
||||
}
|
||||
}
|
||||
|
||||
blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
|
||||
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
|
||||
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
|
||||
result := diffIDResult{
|
||||
digest: "",
|
||||
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
|
||||
}
|
||||
defer func() { dest <- result }()
|
||||
defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
|
||||
|
||||
result.digest, result.err = computeDiffID(layerStream, decompressor)
|
||||
}
|
||||
|
||||
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
|
||||
func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
|
||||
if decompressor != nil {
|
||||
s, err := decompressor(stream)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer s.Close()
|
||||
stream = s
|
||||
}
|
||||
|
||||
return digest.Canonical.FromReader(stream)
|
||||
}
|
||||
2
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
2
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
|
|
@ -9,7 +9,7 @@ import (
|
|||
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
|
||||
// To do so, all elements of the input path must exist; as a special case, the final component may be
|
||||
// a non-existent name (but not a symlink pointing to a non-existent name)
|
||||
// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
|
||||
// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
|
||||
func ResolvePathToFullyExplicit(path string) (string, error) {
|
||||
switch _, err := os.Lstat(path); {
|
||||
case err == nil:
|
||||
|
|
|
|||
39
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
39
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
|
|
@ -3,7 +3,6 @@ package archive
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
|
|
@ -13,8 +12,8 @@ import (
|
|||
type archiveImageDestination struct {
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
ref archiveReference
|
||||
archive *tarfile.Writer // Should only be closed if writer != nil
|
||||
writer io.Closer // May be nil if the archive is shared
|
||||
writer *Writer // Should be closed if closeWriter
|
||||
closeWriter bool
|
||||
}
|
||||
|
||||
func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
|
||||
|
|
@ -22,29 +21,28 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat
|
|||
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
|
||||
var archive *tarfile.Writer
|
||||
var writer io.Closer
|
||||
if ref.archiveWriter != nil {
|
||||
archive = ref.archiveWriter
|
||||
writer = nil
|
||||
var writer *Writer
|
||||
var closeWriter bool
|
||||
if ref.writer != nil {
|
||||
writer = ref.writer
|
||||
closeWriter = false
|
||||
} else {
|
||||
fh, err := openArchiveForWriting(ref.path)
|
||||
w, err := NewWriter(sys, ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archive = tarfile.NewWriter(fh)
|
||||
writer = fh
|
||||
writer = w
|
||||
closeWriter = true
|
||||
}
|
||||
tarDest := tarfile.NewDestination(sys, archive, ref.Transport().Name(), ref.ref)
|
||||
tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
return &archiveImageDestination{
|
||||
Destination: tarDest,
|
||||
ref: ref,
|
||||
archive: archive,
|
||||
writer: writer,
|
||||
closeWriter: closeWriter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -56,7 +54,7 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
|
|||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *archiveImageDestination) Close() error {
|
||||
if d.writer != nil {
|
||||
if d.closeWriter {
|
||||
return d.writer.Close()
|
||||
}
|
||||
return nil
|
||||
|
|
@ -70,8 +68,15 @@ func (d *archiveImageDestination) Close() error {
|
|||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
if d.writer != nil {
|
||||
return d.archive.Close()
|
||||
d.writer.imageCommitted()
|
||||
if d.closeWriter {
|
||||
// We could do this only in .Close(), but failures in .Close() are much more likely to be
|
||||
// ignored by callers that use defer. So, in single-image destinations, try to complete
|
||||
// the archive here.
|
||||
// But if Commit() is never called, let .Close() clean up.
|
||||
err := d.writer.Close()
|
||||
d.closeWriter = false
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
|
|
@ -1,8 +1,6 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
|
|
@ -15,7 +13,7 @@ type archiveImageSource struct {
|
|||
|
||||
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
|
||||
func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
|
||||
var archive *tarfile.Reader
|
||||
var closeArchive bool
|
||||
if ref.archiveReader != nil {
|
||||
|
|
|
|||
23
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
23
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
|
|
@ -53,7 +53,7 @@ type archiveReference struct {
|
|||
// file, not necessarily path precisely).
|
||||
archiveReader *tarfile.Reader
|
||||
// If not nil, must have been created for path
|
||||
archiveWriter *tarfile.Writer
|
||||
writer *Writer
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
|
|
@ -62,24 +62,23 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
|||
return nil, fmt.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
path := parts[0]
|
||||
path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":")
|
||||
var nt reference.NamedTagged
|
||||
sourceIndex := -1
|
||||
|
||||
if len(parts) == 2 {
|
||||
if gotTagOrIndex {
|
||||
// A :tag or :@index was specified.
|
||||
if len(parts[1]) > 0 && parts[1][0] == '@' {
|
||||
i, err := strconv.Atoi(parts[1][1:])
|
||||
if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' {
|
||||
i, err := strconv.Atoi(tagOrIndex[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err)
|
||||
return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err)
|
||||
}
|
||||
if i < 0 {
|
||||
return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
|
||||
}
|
||||
sourceIndex = i
|
||||
} else {
|
||||
ref, err := reference.ParseNormalizedNamed(parts[1])
|
||||
ref, err := reference.ParseNormalizedNamed(tagOrIndex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
|
||||
}
|
||||
|
|
@ -108,7 +107,7 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro
|
|||
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
|
||||
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
|
||||
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
||||
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
|
||||
archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) {
|
||||
if strings.Contains(path, ":") {
|
||||
return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
|
||||
}
|
||||
|
|
@ -126,7 +125,7 @@ func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
|||
ref: ref,
|
||||
sourceIndex: sourceIndex,
|
||||
archiveReader: archiveReader,
|
||||
archiveWriter: archiveWriter,
|
||||
writer: writer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -137,7 +136,7 @@ func (ref archiveReference) Transport() types.ImageTransport {
|
|||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref archiveReference) StringWithinTransport() string {
|
||||
switch {
|
||||
|
|
@ -191,7 +190,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte
|
|||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, sys, ref)
|
||||
return newImageSource(sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
|
|
|
|||
98
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
98
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
|
|
@ -13,47 +14,19 @@ import (
|
|||
|
||||
// Writer manages a single in-progress Docker archive and allows adding images to it.
|
||||
type Writer struct {
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
regularFile bool // path refers to a regular file (e.g. not a pipe)
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
|
||||
// The following state can only be accessed with the mutex held.
|
||||
mutex sync.Mutex
|
||||
hadCommit bool // At least one successful commit has happened
|
||||
}
|
||||
|
||||
// NewWriter returns a Writer for path.
|
||||
// The caller should call .Close() on the returned object.
|
||||
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
|
||||
fh, err := openArchiveForWriting(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive := tarfile.NewWriter(fh)
|
||||
|
||||
return &Writer{
|
||||
path: path,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and
|
||||
// releases state associated with the Writer, if any.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
err := w.archive.Close()
|
||||
if err2 := w.writer.Close(); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewReference returns an ImageReference that allows adding an image to Writer,
|
||||
// with an optional reference.
|
||||
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(w.path, destinationRef, -1, nil, w.archive)
|
||||
}
|
||||
|
||||
// openArchiveForWriting opens path for writing a tar archive,
|
||||
// making a few sanity checks.
|
||||
func openArchiveForWriting(path string) (*os.File, error) {
|
||||
// path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
|
|
@ -69,15 +42,62 @@ func openArchiveForWriting(path string) (*os.File, error) {
|
|||
fh.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("statting file %q: %w", path, err)
|
||||
}
|
||||
|
||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||
regularFile := fhStat.Mode().IsRegular()
|
||||
if regularFile && fhStat.Size() != 0 {
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
archive := tarfile.NewWriter(fh)
|
||||
|
||||
succeeded = true
|
||||
return fh, nil
|
||||
return &Writer{
|
||||
path: path,
|
||||
regularFile: regularFile,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
hadCommit: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// imageCommitted notifies the Writer that at least one image was successfully committed to the stream.
|
||||
func (w *Writer) imageCommitted() {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
w.hadCommit = true
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and
|
||||
// releases state associated with the Writer, if any.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
err := w.archive.Close()
|
||||
if err2 := w.writer.Close(); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
if err == nil && w.regularFile && !w.hadCommit {
|
||||
// Writing to the destination never had a success; delete the destination if we created it.
|
||||
// This is done primarily because we don’t implement adding another image to a pre-existing image, so if we
|
||||
// left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without
|
||||
// the caller manually deleting the partial archive. So, delete it instead.
|
||||
//
|
||||
// Archives with at least one successfully created image are left around; they might still be valuable.
|
||||
//
|
||||
// Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it.
|
||||
// Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success.
|
||||
if err2 := os.Remove(w.path); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewReference returns an ImageReference that allows adding an image to Writer,
|
||||
// with an optional reference.
|
||||
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(w.path, destinationRef, -1, nil, w)
|
||||
}
|
||||
|
|
|
|||
253
vendor/github.com/containers/image/v5/docker/body_reader.go
generated
vendored
Normal file
253
vendor/github.com/containers/image/v5/docker/body_reader.go
generated
vendored
Normal file
|
|
@ -0,0 +1,253 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// bodyReaderMinimumProgress is the minimum progress we consider a good reason to retry
|
||||
bodyReaderMinimumProgress = 1 * 1024 * 1024
|
||||
// bodyReaderMSSinceLastRetry is the minimum time since a last retry we consider a good reason to retry
|
||||
bodyReaderMSSinceLastRetry = 60 * 1_000
|
||||
)
|
||||
|
||||
// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob,
|
||||
// which can transparently resume some (very limited) kinds of aborted connections.
|
||||
type bodyReader struct {
|
||||
ctx context.Context
|
||||
c *dockerClient
|
||||
path string // path to pass to makeRequest to retry
|
||||
logURL *url.URL // a string to use in error messages
|
||||
firstConnectionTime time.Time
|
||||
|
||||
body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close.
|
||||
lastRetryOffset int64 // -1 if N/A
|
||||
lastRetryTime time.Time // time.Time{} if N/A
|
||||
offset int64 // Current offset within the blob
|
||||
lastSuccessTime time.Time // time.Time{} if N/A
|
||||
}
|
||||
|
||||
// newBodyReader creates a bodyReader for request path in c.
|
||||
// firstBody is an already correctly opened body for the blob, returning the full blob from the start.
|
||||
// If reading from firstBody fails, bodyReader may heuristically decide to resume.
|
||||
func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) {
|
||||
logURL, err := c.resolveRequestURL(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := &bodyReader{
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
path: path,
|
||||
logURL: logURL,
|
||||
firstConnectionTime: time.Now(),
|
||||
|
||||
body: firstBody,
|
||||
lastRetryOffset: -1,
|
||||
lastRetryTime: time.Time{},
|
||||
offset: 0,
|
||||
lastSuccessTime: time.Time{},
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number.
|
||||
func parseDecimalInString(s string, start int) (int64, int, error) {
|
||||
i := start
|
||||
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
if i == start {
|
||||
return -1, -1, errors.New("missing decimal number")
|
||||
}
|
||||
v, err := strconv.ParseInt(s[start:i], 10, 64)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("parsing number: %w", err)
|
||||
}
|
||||
return v, i, nil
|
||||
}
|
||||
|
||||
// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it.
|
||||
func parseExpectedChar(s string, pos int, expected byte) (int, error) {
|
||||
if pos == len(s) || s[pos] != expected {
|
||||
return -1, fmt.Errorf("missing expected %q", expected)
|
||||
}
|
||||
return pos + 1, nil
|
||||
}
|
||||
|
||||
// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1.
|
||||
func parseContentRange(res *http.Response) (int64, int64, int64, error) {
|
||||
hdrs := res.Header.Values("Content-Range")
|
||||
switch len(hdrs) {
|
||||
case 0:
|
||||
return -1, -1, -1, errors.New("missing Content-Range: header")
|
||||
case 1:
|
||||
break
|
||||
default:
|
||||
return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs))
|
||||
}
|
||||
hdr := hdrs[0]
|
||||
expectedPrefix := "bytes "
|
||||
if !strings.HasPrefix(hdr, expectedPrefix) {
|
||||
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix)
|
||||
}
|
||||
first, pos, err := parseDecimalInString(hdr, len(expectedPrefix))
|
||||
if err != nil {
|
||||
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err)
|
||||
}
|
||||
pos, err = parseExpectedChar(hdr, pos, '-')
|
||||
if err != nil {
|
||||
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
|
||||
}
|
||||
last, pos, err := parseDecimalInString(hdr, pos)
|
||||
if err != nil {
|
||||
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err)
|
||||
}
|
||||
pos, err = parseExpectedChar(hdr, pos, '/')
|
||||
if err != nil {
|
||||
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
|
||||
}
|
||||
completeLength := int64(-1)
|
||||
if pos < len(hdr) && hdr[pos] == '*' {
|
||||
pos++
|
||||
} else {
|
||||
completeLength, pos, err = parseDecimalInString(hdr, pos)
|
||||
if err != nil {
|
||||
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err)
|
||||
}
|
||||
}
|
||||
if pos < len(hdr) {
|
||||
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr)
|
||||
}
|
||||
return first, last, completeLength, nil
|
||||
}
|
||||
|
||||
// Read implements io.ReadCloser
|
||||
func (br *bodyReader) Read(p []byte) (int, error) {
|
||||
if br.body == nil {
|
||||
return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted())
|
||||
}
|
||||
n, err := br.body.Read(p)
|
||||
br.offset += int64(n)
|
||||
switch {
|
||||
case err == nil || err == io.EOF:
|
||||
br.lastSuccessTime = time.Now()
|
||||
return n, err // Unlike the default: case, don’t log anything.
|
||||
|
||||
case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET):
|
||||
originalErr := err
|
||||
redactedURL := br.logURL.Redacted()
|
||||
if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if err := br.body.Close(); err != nil {
|
||||
logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
|
||||
}
|
||||
br.body = nil
|
||||
time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
|
||||
|
||||
headers := map[string][]string{
|
||||
"Range": {fmt.Sprintf("bytes=%d-", br.offset)},
|
||||
}
|
||||
res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err)
|
||||
}
|
||||
consumedBody := false
|
||||
defer func() {
|
||||
if !consumedBody {
|
||||
res.Body.Close()
|
||||
}
|
||||
}()
|
||||
switch res.StatusCode {
|
||||
case http.StatusPartialContent: // OK
|
||||
// A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed.
|
||||
// The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation.
|
||||
first, last, completeLength, err := parseContentRange(res)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err)
|
||||
}
|
||||
// We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob.
|
||||
if first != br.offset || (completeLength != -1 && last+1 != completeLength) {
|
||||
return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength)
|
||||
}
|
||||
// Continue below
|
||||
case http.StatusOK:
|
||||
return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK)
|
||||
default:
|
||||
err := registryHTTPResponseToError(res)
|
||||
return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Successfully reconnected to %s", redactedURL)
|
||||
consumedBody = true
|
||||
br.body = res.Body
|
||||
br.lastRetryOffset = br.offset
|
||||
br.lastRetryTime = time.Time{}
|
||||
return n, nil
|
||||
|
||||
default:
|
||||
logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err)
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value.
|
||||
// If tm is time.Time{}, it returns math.NaN()
|
||||
func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 {
|
||||
if tm == (time.Time{}) {
|
||||
return math.NaN()
|
||||
}
|
||||
return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0
|
||||
}
|
||||
|
||||
// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil,
|
||||
// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic)
|
||||
func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error {
|
||||
currentTime := time.Now()
|
||||
msSinceFirstConnection := millisecondsSinceOptional(currentTime, br.firstConnectionTime)
|
||||
msSinceLastRetry := millisecondsSinceOptional(currentTime, br.lastRetryTime)
|
||||
msSinceLastSuccess := millisecondsSinceOptional(currentTime, br.lastSuccessTime)
|
||||
logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: total %d @%.3f ms, last retry %d @%.3f ms, last progress @%.3f ms",
|
||||
redactedURL, originalErr, br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess)
|
||||
progress := br.offset - br.lastRetryOffset
|
||||
if progress >= bodyReaderMinimumProgress {
|
||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress)
|
||||
return nil
|
||||
}
|
||||
if br.lastRetryTime == (time.Time{}) {
|
||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr)
|
||||
return nil
|
||||
}
|
||||
if msSinceLastRetry >= bodyReaderMSSinceLastRetry {
|
||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry)
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry)
|
||||
return fmt.Errorf("(heuristic tuning data: total %d @%.3f ms, last retry %d @%.3f ms, last progress @ %.3f ms): %w",
|
||||
br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess, originalErr)
|
||||
}
|
||||
|
||||
// Close implements io.ReadCloser
|
||||
func (br *bodyReader) Close() error {
|
||||
if br.body == nil {
|
||||
return nil
|
||||
}
|
||||
err := br.body.Close()
|
||||
br.body = nil
|
||||
return err
|
||||
}
|
||||
147
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
Normal file
147
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go
|
||||
// Copyright 2022 github.com/distribution/distribution authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
// errcode.Errors slice.
|
||||
var errNoErrorsInBody = errors.New("no error details found in HTTP response body")
|
||||
|
||||
// unexpectedHTTPStatusError is returned when an unexpected HTTP status is
|
||||
// returned when making a registry api call.
|
||||
type unexpectedHTTPStatusError struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
func (e *unexpectedHTTPStatusError) Error() string {
|
||||
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
|
||||
}
|
||||
|
||||
// unexpectedHTTPResponseError is returned when an expected HTTP status code
|
||||
// is returned, but the content was unexpected and failed to be parsed.
|
||||
type unexpectedHTTPResponseError struct {
|
||||
ParseErr error
|
||||
StatusCode int
|
||||
Response []byte
|
||||
}
|
||||
|
||||
func (e *unexpectedHTTPResponseError) Error() string {
|
||||
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
|
||||
}
|
||||
|
||||
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
||||
var errors errcode.Errors
|
||||
body, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For backward compatibility, handle irregularly formatted
|
||||
// messages that contain a "details" field.
|
||||
var detailsErr struct {
|
||||
Details string `json:"details"`
|
||||
}
|
||||
err = json.Unmarshal(body, &detailsErr)
|
||||
if err == nil && detailsErr.Details != "" {
|
||||
switch statusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
|
||||
case http.StatusTooManyRequests:
|
||||
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
|
||||
default:
|
||||
return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &errors); err != nil {
|
||||
return &unexpectedHTTPResponseError{
|
||||
ParseErr: err,
|
||||
StatusCode: statusCode,
|
||||
Response: body,
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) == 0 {
|
||||
// If there was no error specified in the body, return
|
||||
// UnexpectedHTTPResponseError.
|
||||
return &unexpectedHTTPResponseError{
|
||||
ParseErr: errNoErrorsInBody,
|
||||
StatusCode: statusCode,
|
||||
Response: body,
|
||||
}
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
func makeErrorList(err error) []error {
|
||||
if errL, ok := err.(errcode.Errors); ok {
|
||||
return []error(errL)
|
||||
}
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
func mergeErrors(err1, err2 error) error {
|
||||
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
||||
}
|
||||
|
||||
// handleErrorResponse returns error parsed from HTTP response for an
|
||||
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
switch c.Parameters["error"] {
|
||||
case "invalid_token":
|
||||
err.Code = errcode.ErrorCodeUnauthorized
|
||||
case "insufficient_scope":
|
||||
err.Code = errcode.ErrorCodeDenied
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if description := c.Parameters["error_description"]; description != "" {
|
||||
err.Message = description
|
||||
} else {
|
||||
err.Message = err.Code.Message()
|
||||
}
|
||||
|
||||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||
}
|
||||
}
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return &unexpectedHTTPStatusError{Status: resp.Status}
|
||||
}
|
||||
231
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
231
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
|
|
@ -18,16 +19,15 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/useragent"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/image/v5/version"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
clientLib "github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -68,8 +68,6 @@ var (
|
|||
{path: etcDir + "/containers/certs.d", absolute: true},
|
||||
{path: etcDir + "/docker/certs.d", absolute: true},
|
||||
}
|
||||
|
||||
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
|
||||
)
|
||||
|
||||
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
||||
|
|
@ -126,8 +124,9 @@ type dockerClient struct {
|
|||
}
|
||||
|
||||
type authScope struct {
|
||||
remoteName string
|
||||
actions string
|
||||
resourceType string
|
||||
remoteName string
|
||||
actions string
|
||||
}
|
||||
|
||||
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
|
||||
|
|
@ -162,17 +161,6 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
|||
return token, nil
|
||||
}
|
||||
|
||||
// this is cloned from docker/go-connections because upstream docker has changed
|
||||
// it and make deps here fails otherwise.
|
||||
// We'll drop this once we upgrade to docker 1.13.x deps.
|
||||
func serverDefault() *tls.Config {
|
||||
return &tls.Config{
|
||||
// Avoid fallback to SSL protocols < TLS1.0
|
||||
MinVersion: tls.VersionTLS10,
|
||||
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
|
||||
}
|
||||
}
|
||||
|
||||
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
|
||||
func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
if sys != nil && sys.DockerCertPath != "" {
|
||||
|
|
@ -214,6 +202,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
|||
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
// signatureBase is always set in the return value
|
||||
// The caller must call .Close() on the returned client when done.
|
||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
|
||||
auth, err := config.GetCredentialsForRef(sys, ref.ref)
|
||||
if err != nil {
|
||||
|
|
@ -236,6 +225,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis
|
|||
}
|
||||
client.signatureBase = sigBase
|
||||
client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
|
||||
client.scope.resourceType = "repository"
|
||||
client.scope.actions = actions
|
||||
client.scope.remoteName = reference.Path(ref.ref)
|
||||
return client, nil
|
||||
|
|
@ -247,12 +237,15 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis
|
|||
// (e.g., "registry.com[:5000][/some/namespace]/repo").
|
||||
// Please note that newDockerClient does not set all members of dockerClient
|
||||
// (e.g., username and password); those must be set by callers if necessary.
|
||||
// The caller must call .Close() on the returned client when done.
|
||||
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
|
||||
hostName := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
}
|
||||
tlsClientConfig := serverDefault()
|
||||
tlsClientConfig := &tls.Config{
|
||||
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
|
||||
}
|
||||
|
||||
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
||||
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
||||
|
|
@ -282,7 +275,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
|||
}
|
||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||
|
||||
userAgent := defaultUserAgent
|
||||
userAgent := useragent.DefaultUserAgent
|
||||
if sys != nil && sys.DockerRegistryUserAgent != "" {
|
||||
userAgent = sys.DockerRegistryUserAgent
|
||||
}
|
||||
|
|
@ -302,6 +295,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
|||
if err != nil {
|
||||
return fmt.Errorf("creating new docker client: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
client.auth = types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
|
|
@ -312,8 +306,14 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
|||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return httpResponseToError(resp, "")
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := registryHTTPResponseToError(resp)
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
err = ErrUnauthorizedForCredentials{Err: err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchResult holds the information of each matching image
|
||||
|
|
@ -365,6 +365,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("creating new docker client: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
client.auth = auth
|
||||
if sys != nil {
|
||||
client.registryToken = sys.DockerBearerRegistryToken
|
||||
|
|
@ -410,7 +411,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := httpResponseToError(resp, "")
|
||||
err := registryHTTPResponseToError(resp)
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
|
||||
}
|
||||
|
|
@ -442,8 +443,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
|||
if link == "" {
|
||||
break
|
||||
}
|
||||
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
linkURL, err := url.Parse(linkURLStr)
|
||||
linkURLPart, _, _ := strings.Cut(link, ";")
|
||||
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
|
||||
if err != nil {
|
||||
return searchRes, err
|
||||
}
|
||||
|
|
@ -466,12 +467,49 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
|
|||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
url, err := url.Parse(urlString)
|
||||
requestURL, err := c.resolveRequestURL(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// resolveRequestURL turns a path for c.makeRequest into a full URL.
|
||||
// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs.
|
||||
func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
|
||||
urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
res, err := url.Parse(urlString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Checks if the auth headers in the response contain an indication of a failed
|
||||
// authorizdation because of an "insufficient_scope" error. If that's the case,
|
||||
// returns the required scope to be used for fetching a new token.
|
||||
func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
|
||||
if err == nil && res.StatusCode == http.StatusUnauthorized {
|
||||
challenges := parseAuthHeader(res.Header)
|
||||
for _, challenge := range challenges {
|
||||
if challenge.Scheme == "bearer" {
|
||||
if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
|
||||
if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
|
||||
if newScope, err := parseAuthScope(scope); err == nil {
|
||||
return true, newScope
|
||||
} else {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"scope": scope,
|
||||
"challenge": challenge,
|
||||
}).Error("Failed to parse the authentication scope from the given challenge")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
|
||||
|
|
@ -487,9 +525,8 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
|
|||
return time.Duration(num) * time.Second
|
||||
}
|
||||
// Second, check if we have an HTTP date.
|
||||
// If the delta between the date and now is positive, use it.
|
||||
// Otherwise, fall back to using the default exponential back off.
|
||||
if t, err := http.ParseTime(after); err == nil {
|
||||
// If the delta between the date and now is positive, use it.
|
||||
delta := time.Until(t)
|
||||
if delta > 0 {
|
||||
return delta
|
||||
|
|
@ -497,7 +534,6 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
|
|||
logrus.Debugf("Retry-After date in the past, ignoring it")
|
||||
return fallbackDelay
|
||||
}
|
||||
// If the header contents are bogus, fall back to using the default exponential back off.
|
||||
logrus.Debugf("Invalid Retry-After format, ignoring it")
|
||||
return fallbackDelay
|
||||
}
|
||||
|
|
@ -507,12 +543,35 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
|
|||
// makeRequest should generally be preferred.
|
||||
// In case of an HTTP 429 status code in the response, it may automatically retry a few times.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, requestURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
delay := backoffInitialDelay
|
||||
attempts := 0
|
||||
for {
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope)
|
||||
attempts++
|
||||
|
||||
// By default we use pre-defined scopes per operation. In
|
||||
// certain cases, this can fail when our authentication is
|
||||
// insufficient, then we might be getting an error back with a
|
||||
// Www-Authenticate Header indicating an insufficient scope.
|
||||
//
|
||||
// Check for that and update the client challenges to retry after
|
||||
// requesting a new token
|
||||
//
|
||||
// We only try this on the first attempt, to not overload an
|
||||
// already struggling server.
|
||||
// We also cannot retry with a body (stream != nil) as stream
|
||||
// was already read
|
||||
if attempts == 1 && stream == nil && auth != noAuth {
|
||||
if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
|
||||
logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
|
||||
// Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
|
||||
// expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
|
||||
// for more than one extra scope.
|
||||
res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope)
|
||||
extraScope = newScope
|
||||
}
|
||||
}
|
||||
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
stream != nil || // We can't retry with a body (which is not restartable in the general case)
|
||||
attempts == backoffNumIterations {
|
||||
|
|
@ -525,14 +584,14 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
|||
if delay > backoffMaxDelay {
|
||||
delay = backoffMaxDelay
|
||||
}
|
||||
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds())
|
||||
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(delay):
|
||||
// Nothing
|
||||
}
|
||||
delay = delay * 2 // exponential back off
|
||||
delay *= 2 // If the registry does not specify a delay, back off exponentially.
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -540,8 +599,8 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
|||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// Note that no exponential back off is performed when receiving an http 429 status code.
|
||||
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, url.String(), stream)
|
||||
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, resolvedURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, resolvedURL.String(), stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -560,7 +619,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
logrus.Debugf("%s %s", method, url.Redacted())
|
||||
logrus.Debugf("%s %s", method, resolvedURL.Redacted())
|
||||
res, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -592,8 +651,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
|
|||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below
|
||||
// uses the same separator when formatting a remote request (and because
|
||||
// repository names that we create can't contain colons, and extraScope values
|
||||
// coming from a server come from `parseAuthScope`, which also splits on colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions)
|
||||
if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 {
|
||||
return fmt.Errorf(
|
||||
"Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d",
|
||||
cacheKey,
|
||||
colonCount,
|
||||
)
|
||||
}
|
||||
scopes = append(scopes, *extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
|
|
@ -648,9 +717,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
|||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
params.Add("service", service)
|
||||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
params.Add("grant_type", "refresh_token")
|
||||
|
|
@ -700,8 +770,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
|||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -742,19 +812,19 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
|||
c.client = &http.Client{Transport: tr}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry))
|
||||
pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
|
||||
logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
|
||||
logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return httpResponseToError(resp, "")
|
||||
return registryHTTPResponseToError(resp)
|
||||
}
|
||||
c.challenges = parseAuthHeader(resp.Header)
|
||||
c.scheme = scheme
|
||||
|
|
@ -772,17 +842,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
|||
}
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
|
||||
pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
|
||||
logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
|
||||
logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return false
|
||||
}
|
||||
|
|
@ -840,14 +910,14 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
|
|||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
for _, u := range urls {
|
||||
url, err := url.Parse(u)
|
||||
if err != nil || (url.Scheme != "http" && url.Scheme != "https") {
|
||||
blobURL, err := url.Parse(u)
|
||||
if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") {
|
||||
continue // unsupported url. skip this url.
|
||||
}
|
||||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
|
|
@ -894,15 +964,23 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
|
|||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if err := httpResponseToError(res, "Error fetching blob"); err != nil {
|
||||
if res.StatusCode != http.StatusOK {
|
||||
err := registryHTTPResponseToError(res)
|
||||
res.Body.Close()
|
||||
return nil, 0, fmt.Errorf("fetching blob: %w", err)
|
||||
}
|
||||
cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
|
||||
blobSize := getBlobSize(res)
|
||||
|
||||
reconnectingReader, err := newBodyReader(ctx, c, path, res.Body)
|
||||
if err != nil {
|
||||
res.Body.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
|
||||
return res.Body, getBlobSize(res), nil
|
||||
return reconnectingReader, blobSize, nil
|
||||
}
|
||||
|
||||
// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit.
|
||||
// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit.
|
||||
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
|
|
@ -911,7 +989,7 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
|||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize)
|
||||
payload, err := iolimits.ReadAtMost(reader, maxSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
|
||||
}
|
||||
|
|
@ -920,16 +998,22 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
|||
|
||||
// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error.
|
||||
func isManifestUnknownError(err error) bool {
|
||||
var errs errcode.Errors
|
||||
if !errors.As(err, &errs) || len(errs) == 0 {
|
||||
return false
|
||||
// docker/distribution, and as defined in the spec
|
||||
var ec errcode.ErrorCoder
|
||||
if errors.As(err, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown {
|
||||
return true
|
||||
}
|
||||
err = errs[0]
|
||||
ec, ok := err.(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
return false
|
||||
// registry.redhat.io as of October 2022
|
||||
var e errcode.Error
|
||||
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
|
||||
return true
|
||||
}
|
||||
return ec.ErrorCode() == v2.ErrorCodeManifestUnknown
|
||||
// ALSO registry.redhat.io as of October 2022
|
||||
var unexpected *unexpectedHTTPResponseError
|
||||
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for
|
||||
|
|
@ -975,9 +1059,8 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
|||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), clientLib.HandleErrorResponse(res))
|
||||
return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), registryHTTPResponseToError(res))
|
||||
}
|
||||
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||
|
|
@ -996,3 +1079,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
|||
func sigstoreAttachmentTag(d digest.Digest) string {
|
||||
return strings.Replace(d.String(), ":", "-", 1) + ".sig"
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized dockerClient, if any.
|
||||
func (c *dockerClient) Close() error {
|
||||
if c.client != nil {
|
||||
c.client.CloseIdleConnections()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
10
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
10
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
|
|
@ -68,6 +68,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
tags := make([]string, 0)
|
||||
|
||||
|
|
@ -77,8 +78,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
|||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if err := httpResponseToError(res, "fetching tags list"); err != nil {
|
||||
return nil, err
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("fetching tags list: %w", registryHTTPResponseToError(res))
|
||||
}
|
||||
|
||||
var tagsHolder struct {
|
||||
|
|
@ -94,8 +95,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
|||
break
|
||||
}
|
||||
|
||||
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
linkURL, err := url.Parse(linkURLStr)
|
||||
linkURLPart, _, _ := strings.Cut(link, ";")
|
||||
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
|
@ -136,6 +137,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
|
|||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create client: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
|
||||
headers := map[string][]string{
|
||||
|
|
|
|||
150
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
150
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/internal/streamdigest"
|
||||
"github.com/containers/image/v5/internal/uploadreader"
|
||||
|
|
@ -32,6 +33,8 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type dockerImageDestination struct {
|
||||
|
|
@ -90,7 +93,7 @@ func (d *dockerImageDestination) Reference() types.ImageReference {
|
|||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *dockerImageDestination) Close() error {
|
||||
return nil
|
||||
return d.c.Close()
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
|
|
@ -129,8 +132,8 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
|||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
|
||||
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
|
||||
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
|
||||
|
|
@ -138,7 +141,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer cleanup()
|
||||
stream = streamCopy
|
||||
|
|
@ -149,10 +152,10 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return reusedInfo, nil
|
||||
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -161,16 +164,16 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||
return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
|
||||
return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err)
|
||||
return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
|
||||
}
|
||||
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||
|
|
@ -198,7 +201,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
return uploadLocation, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
blobDigest := digester.Digest()
|
||||
|
||||
|
|
@ -209,17 +212,17 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||
return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
|
||||
return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", blobDigest)
|
||||
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
|
||||
return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||
|
|
@ -244,7 +247,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
|
|||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
default:
|
||||
return false, -1, fmt.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -296,34 +299,32 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
|||
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
|
||||
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
|
||||
// The caller must ensure info.Digest is set.
|
||||
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) {
|
||||
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) {
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
|
||||
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, nil
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
|
||||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
|
|
@ -358,8 +359,9 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
// Checking candidateRepo, and mounting from it, requires an
|
||||
// expanded token scope.
|
||||
extraScope := &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
resourceType: "repository",
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
|
|
@ -392,10 +394,14 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
continue
|
||||
}
|
||||
|
||||
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
|
||||
return true, private.ReusedBlob{
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: compressionOperation,
|
||||
CompressionAlgorithm: compressionAlgorithm}, nil
|
||||
}
|
||||
|
||||
return false, types.BlobInfo{}, nil
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
|
@ -406,7 +412,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
|
||||
refTail := ""
|
||||
var refTail string
|
||||
if instanceDigest != nil {
|
||||
// If the instanceDigest is provided, then use it as the refTail, because the reference,
|
||||
// whether it includes a tag or a digest, refers to the list as a whole, and not this
|
||||
|
|
@ -486,15 +492,10 @@ func successStatus(status int) bool {
|
|||
return status >= 200 && status <= 399
|
||||
}
|
||||
|
||||
// isManifestInvalidError returns true iff err from client.HandleErrorResponse is a “manifest invalid” error.
|
||||
// isManifestInvalidError returns true iff err from registryHTTPResponseToError is a “manifest invalid” error.
|
||||
func isManifestInvalidError(err error) bool {
|
||||
errors, ok := err.(errcode.Errors)
|
||||
if !ok || len(errors) == 0 {
|
||||
return false
|
||||
}
|
||||
err = errors[0]
|
||||
ec, ok := err.(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
var ec errcode.ErrorCoder
|
||||
if ok := errors.As(err, &ec); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
@ -584,8 +585,8 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
|
|||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
for i, signature := range signatures {
|
||||
url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
err := d.putOneSignature(url, signature)
|
||||
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
err := d.putOneSignature(sigURL, signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -596,8 +597,8 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
|
|||
// is enough for dockerImageSource to stop looking for other signatures, so that
|
||||
// is sufficient.
|
||||
for i := len(signatures); ; i++ {
|
||||
url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
missing, err := d.c.deleteOneSignature(url)
|
||||
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
missing, err := d.c.deleteOneSignature(sigURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -609,13 +610,13 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
|
|||
return nil
|
||||
}
|
||||
|
||||
// putOneSignature stores sig to url.
|
||||
// putOneSignature stores sig to sigURL.
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (d *dockerImageDestination) putOneSignature(url *url.URL, sig signature.Signature) error {
|
||||
switch url.Scheme {
|
||||
func (d *dockerImageDestination) putOneSignature(sigURL *url.URL, sig signature.Signature) error {
|
||||
switch sigURL.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Writing to %s", url.Path)
|
||||
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
|
||||
logrus.Debugf("Writing to %s", sigURL.Path)
|
||||
err := os.MkdirAll(filepath.Dir(sigURL.Path), 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -623,16 +624,16 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, sig signature.Sig
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(url.Path, blob, 0644)
|
||||
err = os.WriteFile(sigURL.Path, blob, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case "http", "https":
|
||||
return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted())
|
||||
return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted())
|
||||
default:
|
||||
return fmt.Errorf("Unsupported scheme when writing signature to %s", url.Redacted())
|
||||
return fmt.Errorf("Unsupported scheme when writing signature to %s", sigURL.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -643,7 +644,7 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
|
|||
|
||||
ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest)
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
var ociConfig imgspecv1.Image // Most fields empty by default
|
||||
if ociManifest == nil {
|
||||
|
|
@ -652,6 +653,7 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
|
|||
Digest: "", // We will fill this in later.
|
||||
Size: 0,
|
||||
}, nil)
|
||||
ociConfig.RootFS.Type = "layers"
|
||||
} else {
|
||||
logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String())
|
||||
// We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
|
||||
|
|
@ -714,13 +716,13 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
|
|||
LayerIndex: nil,
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
ociManifest.Config = configDesc
|
||||
|
||||
manifestBlob, err := ociManifest.Serialize()
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Uploading sigstore attachment manifest")
|
||||
return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))
|
||||
|
|
@ -734,24 +736,15 @@ func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
|
|||
// But right now we don’t want to deal with corner cases like bad digest formats
|
||||
// or unavailable algorithms; in the worst case we end up with duplicate signature
|
||||
// entries.
|
||||
layer.Digest.String() != digest.FromBytes(payloadBlob).String() {
|
||||
layer.Digest.String() != digest.FromBytes(payloadBlob).String() ||
|
||||
!maps.Equal(layer.Annotations, annotations) {
|
||||
return false
|
||||
}
|
||||
if len(layer.Annotations) != len(annotations) {
|
||||
return false
|
||||
}
|
||||
for k, v1 := range layer.Annotations {
|
||||
if v2, ok := annotations[k]; !ok || v1 != v2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// All annotations in layer exist in sig, and the number of annotations is the same, so all annotations
|
||||
// in sig also exist in layer.
|
||||
return true
|
||||
}
|
||||
|
||||
// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate
|
||||
// OCI descriptior.
|
||||
// OCI descriptor.
|
||||
func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) {
|
||||
blobDigest := digest.FromBytes(contents)
|
||||
info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents),
|
||||
|
|
@ -770,23 +763,23 @@ func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents
|
|||
}, nil
|
||||
}
|
||||
|
||||
// deleteOneSignature deletes a signature from url, if it exists.
|
||||
// deleteOneSignature deletes a signature from sigURL, if it exists.
|
||||
// If it successfully determines that the signature does not exist, returns (true, nil)
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
|
||||
switch url.Scheme {
|
||||
func (c *dockerClient) deleteOneSignature(sigURL *url.URL) (missing bool, err error) {
|
||||
switch sigURL.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Deleting %s", url.Path)
|
||||
err := os.Remove(url.Path)
|
||||
logrus.Debugf("Deleting %s", sigURL.Path)
|
||||
err := os.Remove(sigURL.Path)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
|
||||
case "http", "https":
|
||||
return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted())
|
||||
return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted())
|
||||
default:
|
||||
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted())
|
||||
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", sigURL.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -806,12 +799,11 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingSigNames := map[string]struct{}{}
|
||||
existingSigNames := set.New[string]()
|
||||
for _, sig := range existingSignatures.Signatures {
|
||||
existingSigNames[sig.Name] = struct{}{}
|
||||
existingSigNames.Add(sig.Name)
|
||||
}
|
||||
|
||||
sigExists:
|
||||
for _, newSigWithFormat := range signatures {
|
||||
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
|
||||
if !ok {
|
||||
|
|
@ -819,10 +811,10 @@ sigExists:
|
|||
}
|
||||
newSig := newSigSimple.UntrustedSignature()
|
||||
|
||||
for _, existingSig := range existingSignatures.Signatures {
|
||||
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
|
||||
continue sigExists
|
||||
}
|
||||
if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool {
|
||||
return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
|
||||
|
|
@ -834,7 +826,7 @@ sigExists:
|
|||
return fmt.Errorf("generating random signature len %d: %w", n, err)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
if !existingSigNames.Contains(signatureName) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
|||
87
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
87
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
|
|
@ -10,7 +10,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
|
@ -24,10 +23,15 @@ import (
|
|||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server,
|
||||
// even if it were broken or malicious and it continued serving an enormous number of items.
|
||||
const maxLookasideSignatures = 128
|
||||
|
||||
type dockerImageSource struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
|
|
@ -149,6 +153,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
|
|||
s.Compat = impl.AddCompat(s)
|
||||
|
||||
if err := s.ensureManifestIsLoaded(ctx); err != nil {
|
||||
client.Close()
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
|
|
@ -162,7 +167,7 @@ func (s *dockerImageSource) Reference() types.ImageReference {
|
|||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *dockerImageSource) Close() error {
|
||||
return nil
|
||||
return s.c.Close()
|
||||
}
|
||||
|
||||
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
|
||||
|
|
@ -246,7 +251,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
|||
currentOffset += toSkip
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan interface{}),
|
||||
closed: make(chan struct{}),
|
||||
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
consumeStream: true,
|
||||
}
|
||||
|
|
@ -288,7 +293,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
|||
return
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan interface{}),
|
||||
closed: make(chan struct{}),
|
||||
stream: p,
|
||||
}
|
||||
streams <- s
|
||||
|
|
@ -299,7 +304,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
|||
}
|
||||
}
|
||||
|
||||
var multipartByteRangesRe = regexp.MustCompile("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
|
||||
var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
|
||||
|
||||
func parseMediaType(contentType string) (string, map[string]string, error) {
|
||||
mediaType, params, err := mime.ParseMediaType(contentType)
|
||||
|
|
@ -331,7 +336,7 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
|
|||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
headers := make(map[string][]string)
|
||||
|
||||
var rangeVals []string
|
||||
rangeVals := make([]string, 0, len(chunks))
|
||||
for _, c := range chunks {
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||
}
|
||||
|
|
@ -372,12 +377,9 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
|
|||
res.Body.Close()
|
||||
return nil, nil, private.BadPartialRequestError{Status: res.Status}
|
||||
default:
|
||||
err := httpResponseToError(res, "Error fetching partial blob")
|
||||
if err == nil {
|
||||
err = fmt.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
err := registryHTTPResponseToError(res)
|
||||
res.Body.Close()
|
||||
return nil, nil, err
|
||||
return nil, nil, fmt.Errorf("fetching partial blob: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -451,8 +453,12 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
|
|||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
signatures := []signature.Signature{}
|
||||
for i := 0; ; i++ {
|
||||
url := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||
signature, missing, err := s.getOneSignature(ctx, url)
|
||||
if i >= maxLookasideSignatures {
|
||||
return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
|
||||
}
|
||||
|
||||
sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||
signature, missing, err := s.getOneSignature(ctx, sigURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -464,14 +470,14 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
|
|||
return signatures, nil
|
||||
}
|
||||
|
||||
// getOneSignature downloads one signature from url, and returns (signature, false, nil)
|
||||
// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil)
|
||||
// If it successfully determines that the signature does not exist, returns (nil, true, nil).
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature.Signature, bool, error) {
|
||||
switch url.Scheme {
|
||||
func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL) (signature.Signature, bool, error) {
|
||||
switch sigURL.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Reading %s", url.Path)
|
||||
sigBlob, err := os.ReadFile(url.Path)
|
||||
logrus.Debugf("Reading %s", sigURL.Path)
|
||||
sigBlob, err := os.ReadFile(sigURL.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, true, nil
|
||||
|
|
@ -480,13 +486,13 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
|||
}
|
||||
sig, err := signature.FromBlob(sigBlob)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("parsing signature %q: %w", url.Path, err)
|
||||
return nil, false, fmt.Errorf("parsing signature %q: %w", sigURL.Path, err)
|
||||
}
|
||||
return sig, false, nil
|
||||
|
||||
case "http", "https":
|
||||
logrus.Debugf("GET %s", url.Redacted())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
|
||||
logrus.Debugf("GET %s", sigURL.Redacted())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
|
@ -496,22 +502,31 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
|||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
logrus.Debugf("... got status 404, as expected = end of signatures")
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
|
||||
contentType := res.Header.Get("Content-Type")
|
||||
if mimeType := simplifyContentType(contentType); mimeType == "text/html" {
|
||||
logrus.Warnf("Signature %q has Content-Type %q, unexpected for a signature", sigURL.Redacted(), contentType)
|
||||
// Don’t immediately fail; the lookaside spec does not place any requirements on Content-Type.
|
||||
// If the content really is HTML, it’s going to fail in signature.FromBlob.
|
||||
}
|
||||
|
||||
sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
sig, err := signature.FromBlob(sigBlob)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("parsing signature %s: %w", url.Redacted(), err)
|
||||
return nil, false, fmt.Errorf("parsing signature %s: %w", sigURL.Redacted(), err)
|
||||
}
|
||||
return sig, false, nil
|
||||
|
||||
default:
|
||||
return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.Redacted())
|
||||
return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", sigURL.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -591,6 +606,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
headers := map[string][]string{
|
||||
"Accept": manifest.DefaultRequestedManifestMIMETypes,
|
||||
|
|
@ -605,16 +621,16 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
|||
return err
|
||||
}
|
||||
defer get.Body.Close()
|
||||
manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch get.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusNotFound:
|
||||
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
|
||||
default:
|
||||
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||
return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(get))
|
||||
}
|
||||
manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestDigest, err := manifest.Digest(manifestBody)
|
||||
|
|
@ -630,18 +646,13 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
|||
return err
|
||||
}
|
||||
defer delete.Body.Close()
|
||||
|
||||
body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if delete.StatusCode != http.StatusAccepted {
|
||||
return fmt.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
|
||||
return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(delete))
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
url := lookasideStorageURL(c.signatureBase, manifestDigest, i)
|
||||
missing, err := c.deleteOneSignature(url)
|
||||
sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i)
|
||||
missing, err := c.deleteOneSignature(sigURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -757,7 +768,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
|
|||
}
|
||||
|
||||
type signalCloseReader struct {
|
||||
closed chan interface{}
|
||||
closed chan struct{}
|
||||
stream io.ReadCloser
|
||||
consumeStream bool
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
|
|
@ -92,7 +92,7 @@ func (ref dockerReference) Transport() types.ImageTransport {
|
|||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref dockerReference) StringWithinTransport() string {
|
||||
return "//" + reference.FamiliarString(ref.ref)
|
||||
|
|
|
|||
48
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
48
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
|
|
@ -5,7 +5,8 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -35,11 +36,11 @@ func httpResponseToError(res *http.Response, context string) error {
|
|||
case http.StatusTooManyRequests:
|
||||
return ErrTooManyRequests
|
||||
case http.StatusUnauthorized:
|
||||
err := client.HandleErrorResponse(res)
|
||||
err := registryHTTPResponseToError(res)
|
||||
return ErrUnauthorizedForCredentials{Err: err}
|
||||
default:
|
||||
if context != "" {
|
||||
context = context + ": "
|
||||
context += ": "
|
||||
}
|
||||
return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
|
|
@ -48,13 +49,48 @@ func httpResponseToError(res *http.Response, context string) error {
|
|||
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
|
||||
// registry
|
||||
func registryHTTPResponseToError(res *http.Response) error {
|
||||
err := client.HandleErrorResponse(res)
|
||||
if e, ok := err.(*client.UnexpectedHTTPResponseError); ok {
|
||||
err := handleErrorResponse(res)
|
||||
// len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is.
|
||||
if errs, ok := err.(errcode.Errors); ok && len(errs) > 0 {
|
||||
// The docker/distribution registry implementation almost never returns
|
||||
// more than one error in the HTTP body; it seems there is only one
|
||||
// possible instance, where the second error reports a cleanup failure
|
||||
// we don't really care about.
|
||||
//
|
||||
// The only _common_ case where a multi-element error is returned is
|
||||
// created by the handleErrorResponse parser when OAuth authorization fails:
|
||||
// the first element contains errors from a WWW-Authenticate header, the second
|
||||
// element contains errors from the response body.
|
||||
//
|
||||
// In that case the first one is currently _slightly_ more informative (ErrorCodeUnauthorized
|
||||
// for invalid tokens, ErrorCodeDenied for permission denied with a valid token
|
||||
// for the first error, vs. ErrorCodeUnauthorized for both cases for the second error.)
|
||||
//
|
||||
// Also, docker/docker similarly only logs the other errors and returns the
|
||||
// first one.
|
||||
if len(errs) > 1 {
|
||||
logrus.Debugf("Discarding non-primary errors:")
|
||||
for _, err := range errs[1:] {
|
||||
logrus.Debugf(" %s", err.Error())
|
||||
}
|
||||
}
|
||||
err = errs[0]
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case *unexpectedHTTPResponseError:
|
||||
response := string(e.Response)
|
||||
if len(response) > 50 {
|
||||
response = response[:50] + "..."
|
||||
}
|
||||
err = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
|
||||
// %.0w makes e visible to error.Unwrap() without including any text
|
||||
err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e)
|
||||
case errcode.Error:
|
||||
// e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually
|
||||
// rather redundant. So reword it without using e.Code.Error() if e.Message is the default.
|
||||
if e.Message == e.Code.Message() {
|
||||
// %.0w makes e visible to error.Unwrap() without including any text
|
||||
err = fmt.Errorf("%s%.0w", e.Message, e)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
28
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
28
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
|
|
@ -76,15 +76,15 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
|
|||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest == "" {
|
||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer cleanup()
|
||||
stream = streamCopy
|
||||
|
|
@ -92,47 +92,45 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
|
|||
}
|
||||
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
if ok {
|
||||
return reusedInfo, nil
|
||||
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
|
||||
}
|
||||
|
||||
if options.IsConfig {
|
||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
|
||||
return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
|
||||
}
|
||||
d.config = buf
|
||||
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
|
||||
return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
}
|
||||
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
|
|
|
|||
20
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
20
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
|
|
@ -34,15 +34,19 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
|||
}
|
||||
defer file.Close()
|
||||
|
||||
// If the file is already not compressed we can just return the file itself
|
||||
// If the file is seekable and already not compressed we can just return the file itself
|
||||
// as a source. Otherwise we pass the stream to NewReaderFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
return newReader(path, false)
|
||||
var stream io.Reader = file
|
||||
if _, err := file.Seek(0, io.SeekCurrent); err == nil { // seeking is possible
|
||||
decompressed, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
|
||||
}
|
||||
defer decompressed.Close()
|
||||
stream = decompressed
|
||||
if !isCompressed {
|
||||
return newReader(path, false)
|
||||
}
|
||||
}
|
||||
return NewReaderFromStream(sys, stream)
|
||||
}
|
||||
|
|
|
|||
44
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
44
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
|
|
@ -13,10 +13,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
|
||||
|
|
@ -29,7 +32,7 @@ type Writer struct {
|
|||
// Other state.
|
||||
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
||||
repositories map[string]map[string]string
|
||||
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
|
||||
legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent.
|
||||
manifest []ManifestItem
|
||||
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
|
||||
}
|
||||
|
|
@ -42,7 +45,7 @@ func NewWriter(dest io.Writer) *Writer {
|
|||
tar: tar.NewWriter(dest),
|
||||
blobs: make(map[digest.Digest]types.BlobInfo),
|
||||
repositories: map[string]map[string]string{},
|
||||
legacyLayers: map[string]struct{}{},
|
||||
legacyLayers: set.New[string](),
|
||||
manifestByConfig: map[digest.Digest]int{},
|
||||
}
|
||||
}
|
||||
|
|
@ -67,17 +70,17 @@ func (w *Writer) unlock() {
|
|||
|
||||
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
|
||||
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
|
||||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := w.blobs[info.Digest]; ok {
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, nil
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
|
||||
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
|
||||
|
|
@ -89,7 +92,7 @@ func (w *Writer) recordBlobLocked(info types.BlobInfo) {
|
|||
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
|
||||
if _, ok := w.legacyLayers[layerID]; !ok {
|
||||
if !w.legacyLayers.Contains(layerID) {
|
||||
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
|
||||
// See also the comment in physicalLayerPath.
|
||||
physicalLayerPath := w.physicalLayerPath(layerDigest)
|
||||
|
|
@ -106,7 +109,7 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
|
|||
return fmt.Errorf("writing config json file: %w", err)
|
||||
}
|
||||
|
||||
w.legacyLayers[layerID] = struct{}{}
|
||||
w.legacyLayers.Add(layerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -117,7 +120,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
|||
lastLayerID := ""
|
||||
for i, l := range layerDescriptors {
|
||||
// The legacy format requires a config file per layer
|
||||
layerConfig := make(map[string]interface{})
|
||||
layerConfig := make(map[string]any)
|
||||
|
||||
// The root layer doesn't have any parent
|
||||
if lastLayerID != "" {
|
||||
|
|
@ -188,14 +191,9 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
|
|||
if a.Config != b.Config {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
|
||||
}
|
||||
if len(a.Layers) != len(b.Layers) {
|
||||
if !slices.Equal(a.Layers, b.Layers) {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
|
||||
}
|
||||
for i := range a.Layers {
|
||||
if a.Layers[i] != b.Layers[i] {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
|
||||
}
|
||||
}
|
||||
// Ignore RepoTags, that will be built later.
|
||||
// Ignore Parent and LayerSources, which we don’t set to anything meaningful.
|
||||
return nil
|
||||
|
|
@ -229,9 +227,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
|
|||
item = &w.manifest[i]
|
||||
}
|
||||
|
||||
knownRepoTags := map[string]struct{}{}
|
||||
knownRepoTags := set.New[string]()
|
||||
for _, repoTag := range item.RepoTags {
|
||||
knownRepoTags[repoTag] = struct{}{}
|
||||
knownRepoTags.Add(repoTag)
|
||||
}
|
||||
for _, tag := range repoTags {
|
||||
// For github.com/docker/docker consumers, this works just as well as
|
||||
|
|
@ -252,9 +250,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
|
|||
// analysis and explanation.
|
||||
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
|
||||
|
||||
if _, ok := knownRepoTags[refString]; !ok {
|
||||
if !knownRepoTags.Contains(refString) {
|
||||
item.RepoTags = append(item.RepoTags, refString)
|
||||
knownRepoTags[refString] = struct{}{}
|
||||
knownRepoTags.Add(refString)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -337,7 +335,7 @@ func (t *tarFI) ModTime() time.Time {
|
|||
func (t *tarFI) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (t *tarFI) Sys() interface{} {
|
||||
func (t *tarFI) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -346,7 +344,7 @@ func (t *tarFI) Sys() interface{} {
|
|||
func (w *Writer) sendSymlinkLocked(path string, target string) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Sending as tar link %s -> %s", path, target)
|
||||
return w.tar.WriteHeader(hdr)
|
||||
|
|
@ -363,7 +361,7 @@ func (w *Writer) sendBytesLocked(path string, b []byte) error {
|
|||
func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Sending as tar file %s", path)
|
||||
if err := w.tar.WriteHeader(hdr); err != nil {
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/reference/normalize.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/reference/normalize.go
generated
vendored
|
|
@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
|
|||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
|
|
@ -3,13 +3,13 @@
|
|||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// path-component := alphanumeric [separator alphanumeric]*
|
||||
// alphanumeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
|
|
@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) {
|
|||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
// Deprecated: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
|
|
|
|||
153
vendor/github.com/containers/image/v5/docker/reference/regexp.go
generated
vendored
153
vendor/github.com/containers/image/v5/docker/reference/regexp.go
generated
vendored
|
|
@ -1,143 +1,156 @@
|
|||
package reference
|
||||
|
||||
import "regexp"
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
storageRegexp "github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
// alphaNumeric defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
alphaNumeric = `[a-z0-9]+`
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// separator defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
// dashes. Repeated dashes and underscores are intentionally treated
|
||||
// differently. In order to support valid hostnames as name components,
|
||||
// supporting repeated dash was added. Additionally double underscore is
|
||||
// now allowed as a separator to loosen the restriction for previously
|
||||
// supported names.
|
||||
separator = `(?:[._]|__|[-]*)`
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
|
||||
|
||||
// The string counterpart for TagRegexp.
|
||||
tag = `[\w][\w.-]{0,127}`
|
||||
|
||||
// The string counterpart for DigestRegexp.
|
||||
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
|
||||
|
||||
// The string counterpart for IdentifierRegexp.
|
||||
identifier = `([a-f0-9]{64})`
|
||||
|
||||
// The string counterpart for ShortIdentifierRegexp.
|
||||
shortIdentifier = `([a-f0-9]{6,64})`
|
||||
)
|
||||
|
||||
var (
|
||||
// nameComponent restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponent = expression(
|
||||
alphaNumeric,
|
||||
optional(repeated(separator, alphaNumeric)))
|
||||
|
||||
domain = expression(
|
||||
domainComponent,
|
||||
optional(repeated(literal(`.`), domainComponent)),
|
||||
optional(literal(`:`), `[0-9]+`))
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
DomainRegexp = re(domain)
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
TagRegexp = re(tag)
|
||||
|
||||
anchoredTag = anchored(tag)
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
anchoredTagRegexp = storageRegexp.Delayed(anchoredTag)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
DigestRegexp = re(digestPat)
|
||||
|
||||
anchoredDigest = anchored(digestPat)
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest)
|
||||
|
||||
namePat = expression(
|
||||
optional(domain, literal(`/`)),
|
||||
nameComponent,
|
||||
optional(repeated(literal(`/`), nameComponent)))
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
NameRegexp = re(namePat)
|
||||
|
||||
anchoredName = anchored(
|
||||
optional(capture(domain), literal(`/`)),
|
||||
capture(nameComponent,
|
||||
optional(repeated(literal(`/`), nameComponent))))
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
anchoredNameRegexp = storageRegexp.Delayed(anchoredName)
|
||||
|
||||
referencePat = anchored(capture(namePat),
|
||||
optional(literal(":"), capture(tag)),
|
||||
optional(literal("@"), capture(digestPat)))
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
ReferenceRegexp = re(referencePat)
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
IdentifierRegexp = re(identifier)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
ShortIdentifierRegexp = re(shortIdentifier)
|
||||
|
||||
anchoredIdentifier = anchored(identifier)
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
// re compiles the string to a regular expression.
|
||||
var re = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
func literal(s string) string {
|
||||
return regexp.QuoteMeta(s)
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
func expression(res ...string) string {
|
||||
return strings.Join(res, "")
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
func optional(res ...string) string {
|
||||
return group(expression(res...)) + `?`
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
func repeated(res ...string) string {
|
||||
return group(expression(res...)) + `+`
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
func group(res ...string) string {
|
||||
return `(?:` + expression(res...) + `)`
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
func capture(res ...string) string {
|
||||
return `(` + expression(res...) + `)`
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
func anchored(res ...string) string {
|
||||
return `^` + expression(res...) + `$`
|
||||
}
|
||||
|
|
|
|||
46
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
46
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
|
|
@ -13,9 +13,9 @@ import (
|
|||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
||||
|
|
@ -39,18 +39,18 @@ var defaultDockerDir = "/var/lib/containers/sigstore"
|
|||
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
|
||||
// NOTE: Keep this in sync with docs/registries.d.md!
|
||||
type registryConfiguration struct {
|
||||
DefaultDocker *registryNamespace `json:"default-docker"`
|
||||
DefaultDocker *registryNamespace `yaml:"default-docker"`
|
||||
// The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
|
||||
Docker map[string]registryNamespace `json:"docker"`
|
||||
Docker map[string]registryNamespace `yaml:"docker"`
|
||||
}
|
||||
|
||||
// registryNamespace defines lookaside locations for a single namespace.
|
||||
type registryNamespace struct {
|
||||
Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
|
||||
LookasideStaging string `json:"lookaside-staging"` // For writing only.
|
||||
SigStore string `json:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
|
||||
SigStoreStaging string `json:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
|
||||
UseSigstoreAttachments *bool `json:"use-sigstore-attachments,omitempty"`
|
||||
Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
|
||||
LookasideStaging string `yaml:"lookaside-staging"` // For writing only.
|
||||
SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
|
||||
SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
|
||||
UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"`
|
||||
}
|
||||
|
||||
// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage.
|
||||
|
|
@ -163,17 +163,17 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||
// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
|
||||
func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) {
|
||||
topLevel := config.signatureTopLevel(dr, write)
|
||||
var url *url.URL
|
||||
var baseURL *url.URL
|
||||
if topLevel != "" {
|
||||
u, err := url.Parse(topLevel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err)
|
||||
}
|
||||
url = u
|
||||
baseURL = u
|
||||
} else {
|
||||
// returns default directory if no lookaside specified in configuration file
|
||||
url = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID())
|
||||
logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted())
|
||||
baseURL = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID())
|
||||
logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), baseURL.Redacted())
|
||||
}
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
// FIXME? Restrict to explicitly supported schemes?
|
||||
|
|
@ -181,8 +181,8 @@ func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference,
|
|||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||
return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
|
||||
}
|
||||
url.Path = url.Path + "/" + repo
|
||||
return url, nil
|
||||
baseURL.Path = baseURL.Path + "/" + repo
|
||||
return baseURL, nil
|
||||
}
|
||||
|
||||
// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid
|
||||
|
|
@ -201,8 +201,8 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
|
|||
identity := ref.PolicyConfigurationIdentity()
|
||||
if ns, ok := config.Docker[identity]; ok {
|
||||
logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity)
|
||||
if url := ns.signatureTopLevel(write); url != "" {
|
||||
return url
|
||||
if ret := ns.signatureTopLevel(write); ret != "" {
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -210,8 +210,8 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
|
|||
for _, name := range ref.PolicyConfigurationNamespaces() {
|
||||
if ns, ok := config.Docker[name]; ok {
|
||||
logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name)
|
||||
if url := ns.signatureTopLevel(write); url != "" {
|
||||
return url
|
||||
if ret := ns.signatureTopLevel(write); ret != "" {
|
||||
return ret
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -219,8 +219,8 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
|
|||
// Look for a default location
|
||||
if config.DefaultDocker != nil {
|
||||
logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`)
|
||||
if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
|
||||
return url
|
||||
if ret := config.DefaultDocker.signatureTopLevel(write); ret != "" {
|
||||
return ret
|
||||
}
|
||||
}
|
||||
return ""
|
||||
|
|
@ -287,7 +287,7 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
|||
// base is not nil from the caller
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
||||
url := *base
|
||||
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||
return &url
|
||||
sigURL := *base
|
||||
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||
return &sigURL
|
||||
}
|
||||
|
|
|
|||
15
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
15
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
|
|
@ -3,6 +3,7 @@ package docker
|
|||
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
|
@ -70,6 +71,18 @@ func parseAuthHeader(header http.Header) []challenge {
|
|||
return challenges
|
||||
}
|
||||
|
||||
// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`
|
||||
func parseAuthScope(scopeStr string) (*authScope, error) {
|
||||
if parts := strings.Split(scopeStr, ":"); len(parts) == 3 {
|
||||
return &authScope{
|
||||
resourceType: parts[0],
|
||||
remoteName: parts[1],
|
||||
actions: parts[2],
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr)
|
||||
}
|
||||
|
||||
// NOTE: This is not a fully compliant parser per RFC 7235:
|
||||
// Most notably it does not support more than one challenge within a single header
|
||||
// Some of the whitespace parsing also seems noncompliant.
|
||||
|
|
@ -136,7 +149,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) {
|
|||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
for i++; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/image/docker_list.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/image/docker_list.go
generated
vendored
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
generated
vendored
|
|
@ -226,9 +226,9 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.Ma
|
|||
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
|
||||
switch m.m.LayersDescriptors[idx].MediaType {
|
||||
case manifest.DockerV2Schema2ForeignLayerMediaType:
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
|
||||
case manifest.DockerV2Schema2LayerMediaType:
|
||||
|
|
@ -381,7 +381,7 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa
|
|||
delete(rawContents, "rootfs")
|
||||
delete(rawContents, "history")
|
||||
|
||||
updates := map[string]interface{}{"id": v1ID}
|
||||
updates := map[string]any{"id": v1ID}
|
||||
if parentV1ID != "" {
|
||||
updates["parent"] = parentV1ID
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
|
|
@ -215,11 +215,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
|
|||
for idx := range layers {
|
||||
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
|
||||
switch layers[idx].MediaType {
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributable:
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributableGzip:
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributableZstd:
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
|
||||
case imgspecv1.MediaTypeImageLayer:
|
||||
layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/image/oci_index.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/image/oci_index.go
generated
vendored
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/image/sourced.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/image/sourced.go
generated
vendored
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
|
||||
// If reference poitns to a manifest list, .Manifest() still returns the manifest list,
|
||||
// If reference points to a manifest list, .Manifest() still returns the manifest list,
|
||||
// but other methods transparently return data from an appropriate image instance.
|
||||
//
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
|
|
|
|||
40
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
40
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
|
|
@ -22,13 +22,14 @@ type Compat struct {
|
|||
// for implementations of private.ImageDestination.
|
||||
//
|
||||
// Use it like this:
|
||||
// type yourDestination struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
// dest := &yourDestination{…}
|
||||
// dest.Compat = impl.AddCompat(dest)
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourDestination{…}
|
||||
// dest.Compat = impl.AddCompat(dest)
|
||||
func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
|
||||
return Compat{dest}
|
||||
}
|
||||
|
|
@ -42,10 +43,17 @@ func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
|
|||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
|
||||
res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
|
||||
Cache: blobinfocache.FromBlobInfoCache(cache),
|
||||
IsConfig: isConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
return types.BlobInfo{
|
||||
Digest: res.Digest,
|
||||
Size: res.Size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
|
|
@ -58,10 +66,26 @@ func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.
|
|||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
|
||||
reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
|
||||
Cache: blobinfocache.FromBlobInfoCache(cache),
|
||||
CanSubstitute: canSubstitute,
|
||||
})
|
||||
if !reused || err != nil {
|
||||
return reused, types.BlobInfo{}, err
|
||||
}
|
||||
res := types.BlobInfo{
|
||||
Digest: blob.Digest,
|
||||
Size: blob.Size,
|
||||
CompressionOperation: blob.CompressionOperation,
|
||||
CompressionAlgorithm: blob.CompressionAlgorithm,
|
||||
}
|
||||
// This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers.
|
||||
// Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution;
|
||||
// provide the value in cases where it is likely to be correct.
|
||||
if blob.Digest == info.Digest {
|
||||
res.MediaType = info.MediaType
|
||||
}
|
||||
return true, res, nil
|
||||
}
|
||||
|
||||
// PutSignatures writes a set of signatures to the destination.
|
||||
|
|
|
|||
|
|
@ -39,8 +39,8 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
|
|||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
|
||||
return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
||||
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
||||
}
|
||||
|
||||
// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.
|
||||
|
|
|
|||
34
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
generated
vendored
34
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
generated
vendored
|
|
@ -3,23 +3,25 @@
|
|||
// Compare with imagedestination/impl, which might require non-trivial implementation work.
|
||||
//
|
||||
// There are two kinds of stubs:
|
||||
// - Pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
|
||||
// implementation:
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// stubs.ImplementsPutBlobPartial
|
||||
// …
|
||||
// }
|
||||
// - Stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
// type yourDestination struct {
|
||||
// stubs.NoPutBlobPartialInitialize
|
||||
// …
|
||||
// }
|
||||
// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
|
||||
// implementation:
|
||||
//
|
||||
// dest := &yourDestination{
|
||||
// …
|
||||
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
|
||||
// }
|
||||
// type yourDestination struct {
|
||||
// stubs.ImplementsPutBlobPartial
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// stubs.NoPutBlobPartialInitialize
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourDestination{
|
||||
// …
|
||||
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
|
||||
// }
|
||||
package stubs
|
||||
|
|
|
|||
30
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
30
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
|
|
@ -46,20 +46,34 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination {
|
|||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||
res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
return private.UploadedBlob{
|
||||
Digest: res.Digest,
|
||||
Size: res.Size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
|
||||
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
|
||||
if !reused || err != nil {
|
||||
return reused, private.ReusedBlob{}, err
|
||||
}
|
||||
return true, private.ReusedBlob{
|
||||
Digest: blob.Digest,
|
||||
Size: blob.Size,
|
||||
CompressionOperation: blob.CompressionOperation,
|
||||
CompressionAlgorithm: blob.CompressionAlgorithm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
||||
|
|
|
|||
13
vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
generated
vendored
13
vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
generated
vendored
|
|
@ -19,13 +19,14 @@ type Compat struct {
|
|||
// for implementations of private.ImageSource.
|
||||
//
|
||||
// Use it like this:
|
||||
// type yourSource struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
// src := &yourSource{…}
|
||||
// src.Compat = impl.AddCompat(src)
|
||||
//
|
||||
// type yourSource struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// src := &yourSource{…}
|
||||
// src.Compat = impl.AddCompat(src)
|
||||
func AddCompat(src private.ImageSourceInternalOnly) Compat {
|
||||
return Compat{src}
|
||||
}
|
||||
|
|
|
|||
35
vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
generated
vendored
35
vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
generated
vendored
|
|
@ -3,23 +3,26 @@
|
|||
// Compare with imagesource/impl, which might require non-trivial implementation work.
|
||||
//
|
||||
// There are two kinds of stubs:
|
||||
// - Pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
|
||||
// implementation:
|
||||
//
|
||||
// type yourSource struct {
|
||||
// stubs.ImplementsGetBlobAt
|
||||
// …
|
||||
// }
|
||||
// - Stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
// type yourSource struct {
|
||||
// stubs.NoGetBlobAtInitialize
|
||||
// …
|
||||
// }
|
||||
// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
|
||||
//
|
||||
// dest := &yourSource{
|
||||
// …
|
||||
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
|
||||
// }
|
||||
// implementation:
|
||||
//
|
||||
// type yourSource struct {
|
||||
// stubs.ImplementsGetBlobAt
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
//
|
||||
// type yourSource struct {
|
||||
// stubs.NoGetBlobAtInitialize
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourSource{
|
||||
// …
|
||||
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
|
||||
// }
|
||||
package stubs
|
||||
|
|
|
|||
72
vendor/github.com/containers/image/v5/internal/manifest/common.go
generated
vendored
Normal file
72
vendor/github.com/containers/image/v5/internal/manifest/common.go
generated
vendored
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat
|
||||
// can expect to be present.
|
||||
type AllowedManifestFields int
|
||||
|
||||
const (
|
||||
AllowedFieldConfig AllowedManifestFields = 1 << iota
|
||||
AllowedFieldFSLayers
|
||||
AllowedFieldHistory
|
||||
AllowedFieldLayers
|
||||
AllowedFieldManifests
|
||||
AllowedFieldFirstUnusedBit // Keep this at the end!
|
||||
)
|
||||
|
||||
// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
|
||||
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
|
||||
// other than the ones the caller specifically allows.
|
||||
// expectedMIMEType is used only for diagnostics.
|
||||
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
|
||||
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
|
||||
// data that just isn’t what was expected, as opposed to actually ambiguous data.
|
||||
func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
|
||||
allowed AllowedManifestFields) error {
|
||||
if allowed >= AllowedFieldFirstUnusedBit {
|
||||
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
|
||||
}
|
||||
// Use a private type to decode, not just a map[string]any, because we want
|
||||
// to also reject case-insensitive matches (which would be used by Go when really decoding
|
||||
// the manifest).
|
||||
// (It is expected that as manifest formats are added or extended over time, more fields will be added
|
||||
// here.)
|
||||
detectedFields := struct {
|
||||
Config any `json:"config"`
|
||||
FSLayers any `json:"fsLayers"`
|
||||
History any `json:"history"`
|
||||
Layers any `json:"layers"`
|
||||
Manifests any `json:"manifests"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
|
||||
// The caller was supposed to already validate version numbers, so this should not happen;
|
||||
// let’s not bother with making this error “nice”.
|
||||
return err
|
||||
}
|
||||
unexpected := []string{}
|
||||
// Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste.
|
||||
if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 {
|
||||
unexpected = append(unexpected, "config")
|
||||
}
|
||||
if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 {
|
||||
unexpected = append(unexpected, "fsLayers")
|
||||
}
|
||||
if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 {
|
||||
unexpected = append(unexpected, "history")
|
||||
}
|
||||
if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 {
|
||||
unexpected = append(unexpected, "layers")
|
||||
}
|
||||
if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 {
|
||||
unexpected = append(unexpected, "manifests")
|
||||
}
|
||||
if len(unexpected) != 0 {
|
||||
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
|
||||
unexpected, expectedMIMEType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
15
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
|
||||
//
|
||||
// This is publicly visible as c/image/manifest.Schema2Descriptor.
|
||||
type Schema2Descriptor struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
Size int64 `json:"size"`
|
||||
Digest digest.Digest `json:"digest"`
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
}
|
||||
304
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
Normal file
304
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
platform "github.com/containers/image/v5/internal/pkg/platform"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Schema2PlatformSpec describes the platform which a particular manifest is
|
||||
// specialized for.
|
||||
// This is publicly visible as c/image/manifest.Schema2PlatformSpec.
|
||||
type Schema2PlatformSpec struct {
|
||||
Architecture string `json:"architecture"`
|
||||
OS string `json:"os"`
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
Variant string `json:"variant,omitempty"`
|
||||
Features []string `json:"features,omitempty"` // removed in OCI
|
||||
}
|
||||
|
||||
// Schema2ManifestDescriptor references a platform-specific manifest.
|
||||
// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor.
|
||||
type Schema2ManifestDescriptor struct {
|
||||
Schema2Descriptor
|
||||
Platform Schema2PlatformSpec `json:"platform"`
|
||||
}
|
||||
|
||||
// Schema2ListPublic is a list of platform-specific manifests.
|
||||
// This is publicly visible as c/image/manifest.Schema2List.
|
||||
// Internal users should usually use Schema2List instead.
|
||||
type Schema2ListPublic struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Manifests []Schema2ManifestDescriptor `json:"manifests"`
|
||||
}
|
||||
|
||||
// MIMEType returns the MIME type of this particular manifest list.
|
||||
func (list *Schema2ListPublic) MIMEType() string {
|
||||
return list.MediaType
|
||||
}
|
||||
|
||||
// Instances returns a slice of digests of the manifests that this list knows of.
|
||||
func (list *Schema2ListPublic) Instances() []digest.Digest {
|
||||
results := make([]digest.Digest, len(list.Manifests))
|
||||
for i, m := range list.Manifests {
|
||||
results[i] = m.Digest
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Instance returns the ListUpdate of a particular instance in the list.
|
||||
func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
|
||||
for _, manifest := range list.Manifests {
|
||||
if manifest.Digest == instanceDigest {
|
||||
return ListUpdate{
|
||||
Digest: manifest.Digest,
|
||||
Size: manifest.Size,
|
||||
MediaType: manifest.MediaType,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
|
||||
}
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []Schema2ManifestDescriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
case ListOpUpdate:
|
||||
if err := editInstance.UpdateOldDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
|
||||
}
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
addInstance := Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType},
|
||||
Schema2PlatformSpec{
|
||||
OS: editInstance.AddPlatform.OS,
|
||||
Architecture: editInstance.AddPlatform.Architecture,
|
||||
OSVersion: editInstance.AddPlatform.OSVersion,
|
||||
OSFeatures: editInstance.AddPlatform.OSFeatures,
|
||||
Variant: editInstance.AddPlatform.Variant,
|
||||
},
|
||||
}
|
||||
addedEntries = append(addedEntries, addInstance)
|
||||
default:
|
||||
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
|
||||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
// ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list.
|
||||
return list.ChooseInstance(ctx)
|
||||
}
|
||||
|
||||
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
|
||||
// of the image which is appropriate for the current environment.
|
||||
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
for _, d := range list.Manifests {
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
OSVersion: d.Platform.OSVersion,
|
||||
OSFeatures: slices.Clone(d.Platform.OSFeatures),
|
||||
Variant: d.Platform.Variant,
|
||||
}
|
||||
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
|
||||
return d.Digest, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
// Serialize returns the list in a blob format.
|
||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
||||
func (list *Schema2ListPublic) Serialize() ([]byte, error) {
|
||||
buf, err := json.Marshal(list)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the
|
||||
// supplied data.
|
||||
// This is publicly visible as c/image/manifest.Schema2ListFromComponents.
|
||||
func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic {
|
||||
list := Schema2ListPublic{
|
||||
SchemaVersion: 2,
|
||||
MediaType: DockerV2ListMediaType,
|
||||
Manifests: make([]Schema2ManifestDescriptor, len(components)),
|
||||
}
|
||||
for i, component := range components {
|
||||
m := Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{
|
||||
MediaType: component.MediaType,
|
||||
Size: component.Size,
|
||||
Digest: component.Digest,
|
||||
URLs: slices.Clone(component.URLs),
|
||||
},
|
||||
Schema2PlatformSpec{
|
||||
Architecture: component.Platform.Architecture,
|
||||
OS: component.Platform.OS,
|
||||
OSVersion: component.Platform.OSVersion,
|
||||
OSFeatures: slices.Clone(component.Platform.OSFeatures),
|
||||
Variant: component.Platform.Variant,
|
||||
Features: slices.Clone(component.Platform.Features),
|
||||
},
|
||||
}
|
||||
list.Manifests[i] = m
|
||||
}
|
||||
return &list
|
||||
}
|
||||
|
||||
// Schema2ListPublicClone creates a deep copy of the passed-in list.
|
||||
// This is publicly visible as c/image/manifest.Schema2ListClone.
|
||||
func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
|
||||
return Schema2ListPublicFromComponents(list.Manifests)
|
||||
}
|
||||
|
||||
// ToOCI1Index returns the list encoded as an OCI1 index.
|
||||
func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
|
||||
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
|
||||
for _, manifest := range list.Manifests {
|
||||
converted := imgspecv1.Descriptor{
|
||||
MediaType: manifest.MediaType,
|
||||
Size: manifest.Size,
|
||||
Digest: manifest.Digest,
|
||||
URLs: slices.Clone(manifest.URLs),
|
||||
Platform: &imgspecv1.Platform{
|
||||
OS: manifest.Platform.OS,
|
||||
Architecture: manifest.Platform.Architecture,
|
||||
OSFeatures: slices.Clone(manifest.Platform.OSFeatures),
|
||||
OSVersion: manifest.Platform.OSVersion,
|
||||
Variant: manifest.Platform.Variant,
|
||||
},
|
||||
}
|
||||
components = append(components, converted)
|
||||
}
|
||||
oci := OCI1IndexPublicFromComponents(components, nil)
|
||||
return oci, nil
|
||||
}
|
||||
|
||||
// ToSchema2List returns the list encoded as a Schema2 list.
|
||||
func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) {
|
||||
return Schema2ListPublicClone(list), nil
|
||||
}
|
||||
|
||||
// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
// JSON, presumably generated by encoding a Schema2 manifest list.
|
||||
// This is publicly visible as c/image/manifest.Schema2ListFromManifest.
|
||||
func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) {
|
||||
list := Schema2ListPublic{
|
||||
Manifests: []Schema2ManifestDescriptor{},
|
||||
}
|
||||
if err := json.Unmarshal(manifest, &list); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
|
||||
}
|
||||
if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
|
||||
AllowedFieldManifests); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &list, nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of this list and its contents.
|
||||
func (list *Schema2ListPublic) Clone() ListPublic {
|
||||
return Schema2ListPublicClone(list)
|
||||
}
|
||||
|
||||
// ConvertToMIMEType converts the passed-in manifest list to a manifest
|
||||
// list of the specified type.
|
||||
func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
|
||||
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
|
||||
case DockerV2ListMediaType:
|
||||
return list.Clone(), nil
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
return list.ToOCI1Index()
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
|
||||
default:
|
||||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
|
||||
}
|
||||
}
|
||||
|
||||
// Schema2List is a list of platform-specific manifests.
|
||||
type Schema2List struct {
|
||||
Schema2ListPublic
|
||||
}
|
||||
|
||||
func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
|
||||
return &Schema2List{*public}
|
||||
}
|
||||
|
||||
func (index *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
|
||||
}
|
||||
|
||||
func (index *Schema2List) Clone() ListPublic {
|
||||
return index.CloneInternal()
|
||||
}
|
||||
|
||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
// JSON, presumably generated by encoding a Schema2 manifest list.
|
||||
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
|
||||
public, err := Schema2ListPublicFromManifest(manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return schema2ListFromPublic(public), nil
|
||||
}
|
||||
8
vendor/github.com/containers/image/v5/internal/manifest/errors.go
generated
vendored
8
vendor/github.com/containers/image/v5/internal/manifest/errors.go
generated
vendored
|
|
@ -2,6 +2,10 @@ package manifest
|
|||
|
||||
import "fmt"
|
||||
|
||||
// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
|
||||
// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
|
||||
const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
|
||||
|
||||
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
|
||||
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
|
||||
//
|
||||
|
|
@ -28,5 +32,9 @@ func NewNonImageArtifactError(mimeType string) error {
|
|||
}
|
||||
|
||||
func (e NonImageArtifactError) Error() string {
|
||||
// Special-case these invalid mixed images, which show up from time to time:
|
||||
if e.mimeType == dockerV2Schema2ConfigMediaType {
|
||||
return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType)
|
||||
}
|
||||
return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
|
||||
}
|
||||
|
|
|
|||
125
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
Normal file
125
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ListPublic is a subset of List which is a part of the public API;
|
||||
// so no methods can be added, removed or changed.
|
||||
//
|
||||
// Internal users should usually use List instead.
|
||||
type ListPublic interface {
|
||||
// MIMEType returns the MIME type of this particular manifest list.
|
||||
MIMEType() string
|
||||
|
||||
// Instances returns a list of the manifests that this list knows of, other than its own.
|
||||
Instances() []digest.Digest
|
||||
|
||||
// Update information about the list's instances. The length of the passed-in slice must
|
||||
// match the length of the list of instances which the list already contains, and every field
|
||||
// must be specified.
|
||||
UpdateInstances([]ListUpdate) error
|
||||
|
||||
// Instance returns the size and MIME type of a particular instance in the list.
|
||||
Instance(digest.Digest) (ListUpdate, error)
|
||||
|
||||
// ChooseInstance selects which manifest is most appropriate for the platform described by the
|
||||
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
|
||||
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
|
||||
|
||||
// Serialize returns the list in a blob format.
|
||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
|
||||
// from, even if no modifications were made!
|
||||
Serialize() ([]byte, error)
|
||||
|
||||
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
|
||||
ConvertToMIMEType(mimeType string) (ListPublic, error)
|
||||
|
||||
// Clone returns a deep copy of this list and its contents.
|
||||
Clone() ListPublic
|
||||
}
|
||||
|
||||
// List is an interface for parsing, modifying lists of image manifests.
|
||||
// Callers can either use this abstract interface without understanding the details of the formats,
|
||||
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
|
||||
// directly.
|
||||
type List interface {
|
||||
ListPublic
|
||||
// CloneInternal returns a deep copy of this list and its contents.
|
||||
CloneInternal() List
|
||||
// ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the
|
||||
// SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which
|
||||
// when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined.
|
||||
ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error)
|
||||
// Edit information about the list's instances. Contains Slice of ListEdit where each element
|
||||
// is responsible for either Modifying or Adding a new instance to the Manifest. Operation is
|
||||
// selected on the basis of configured ListOperation field.
|
||||
EditInstances([]ListEdit) error
|
||||
}
|
||||
|
||||
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
|
||||
// This is publicly visible as c/image/manifest.ListUpdate.
|
||||
type ListUpdate struct {
|
||||
Digest digest.Digest
|
||||
Size int64
|
||||
MediaType string
|
||||
}
|
||||
|
||||
type ListOp int
|
||||
|
||||
const (
|
||||
listOpInvalid ListOp = iota
|
||||
ListOpAdd
|
||||
ListOpUpdate
|
||||
)
|
||||
|
||||
// ListEdit includes the fields which a List's EditInstances() method will modify.
|
||||
type ListEdit struct {
|
||||
ListOperation ListOp
|
||||
|
||||
// if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set.
|
||||
UpdateOldDigest digest.Digest
|
||||
UpdateDigest digest.Digest
|
||||
UpdateSize int64
|
||||
UpdateMediaType string
|
||||
UpdateAffectAnnotations bool
|
||||
UpdateAnnotations map[string]string
|
||||
UpdateCompressionAlgorithms []compression.Algorithm
|
||||
|
||||
// If Op = ListEditAdd. All fields must be set.
|
||||
AddDigest digest.Digest
|
||||
AddSize int64
|
||||
AddMediaType string
|
||||
AddPlatform *imgspecv1.Platform
|
||||
AddAnnotations map[string]string
|
||||
AddCompressionAlgorithms []compression.Algorithm
|
||||
}
|
||||
|
||||
// ListPublicFromBlob parses a list of manifests.
|
||||
// This is publicly visible as c/image/manifest.ListFromBlob.
|
||||
func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
|
||||
list, err := ListFromBlob(manifest, manifestMIMEType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// ListFromBlob parses a list of manifests.
|
||||
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
|
||||
normalized := NormalizedMIMEType(manifestMIMEType)
|
||||
switch normalized {
|
||||
case DockerV2ListMediaType:
|
||||
return Schema2ListFromManifest(manifest)
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
return OCI1IndexFromManifest(manifest)
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
|
||||
}
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
|
||||
}
|
||||
167
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
Normal file
167
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/containers/libtrust"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
|
||||
|
||||
// FIXME(runcom, mitr): should we have a mediatype pkg??
|
||||
const (
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
|
||||
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
|
||||
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
|
||||
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
|
||||
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
|
||||
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
|
||||
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
|
||||
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
|
||||
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
||||
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
|
||||
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
)
|
||||
|
||||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||
// This is publicly visible as c/image/manifest.GuessMIMEType.
|
||||
func GuessMIMEType(manifest []byte) string {
|
||||
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
|
||||
// Also docker/distribution/manifest.Versioned.
|
||||
meta := struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
Signatures any `json:"signatures"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &meta); err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch meta.MediaType {
|
||||
case DockerV2Schema2MediaType, DockerV2ListMediaType,
|
||||
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
|
||||
return meta.MediaType
|
||||
}
|
||||
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
|
||||
switch meta.SchemaVersion {
|
||||
case 1:
|
||||
if meta.Signatures != nil {
|
||||
return DockerV2Schema1SignedMediaType
|
||||
}
|
||||
return DockerV2Schema1MediaType
|
||||
case 2:
|
||||
// Best effort to understand if this is an OCI image since mediaType
|
||||
// wasn't in the manifest for OCI image-spec < 1.0.2.
|
||||
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
|
||||
ociMan := struct {
|
||||
Config struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
} `json:"config"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &ociMan); err != nil {
|
||||
return ""
|
||||
}
|
||||
switch ociMan.Config.MediaType {
|
||||
case imgspecv1.MediaTypeImageConfig:
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
case DockerV2Schema2ConfigMediaType:
|
||||
// This case should not happen since a Docker image
|
||||
// must declare a top-level media type and
|
||||
// `meta.MediaType` has already been checked.
|
||||
return DockerV2Schema2MediaType
|
||||
}
|
||||
// Maybe an image index or an OCI artifact.
|
||||
ociIndex := struct {
|
||||
Manifests []imgspecv1.Descriptor `json:"manifests"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
|
||||
return ""
|
||||
}
|
||||
if len(ociIndex.Manifests) != 0 {
|
||||
if ociMan.Config.MediaType == "" {
|
||||
return imgspecv1.MediaTypeImageIndex
|
||||
}
|
||||
// FIXME: this is mixing media types of manifests and configs.
|
||||
return ociMan.Config.MediaType
|
||||
}
|
||||
// It's most likely an OCI artifact with a custom config media
|
||||
// type which is not (and cannot) be covered by the media-type
|
||||
// checks cabove.
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
|
||||
// This is publicly visible as c/image/manifest.Digest.
|
||||
func Digest(manifest []byte) (digest.Digest, error) {
|
||||
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
|
||||
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
manifest, err = sig.Payload()
|
||||
if err != nil {
|
||||
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
|
||||
// that libtrust itself has josebase64UrlEncode()d
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return digest.FromBytes(manifest), nil
|
||||
}
|
||||
|
||||
// MatchesDigest returns true iff the manifest matches expectedDigest.
|
||||
// Error may be set if this returns false.
|
||||
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
|
||||
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
|
||||
// This is publicly visible as c/image/manifest.MatchesDigest.
|
||||
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
|
||||
// This should eventually support various digest types.
|
||||
actualDigest, err := Digest(manifest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return expectedDigest == actualDigest, nil
|
||||
}
|
||||
|
||||
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
|
||||
// centralizing various workarounds.
|
||||
// This is publicly visible as c/image/manifest.NormalizedMIMEType.
|
||||
func NormalizedMIMEType(input string) string {
|
||||
switch input {
|
||||
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
|
||||
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
||||
// need to happen within the ImageSource.
|
||||
case "application/json":
|
||||
return DockerV2Schema1SignedMediaType
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
imgspecv1.MediaTypeImageIndex,
|
||||
DockerV2Schema2MediaType,
|
||||
DockerV2ListMediaType:
|
||||
return input
|
||||
default:
|
||||
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
|
||||
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
|
||||
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
|
||||
//
|
||||
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
|
||||
// This makes no real sense, but it happens
|
||||
// because requests for manifests are
|
||||
// redirected to a content distribution
|
||||
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
|
||||
return DockerV2Schema1SignedMediaType
|
||||
}
|
||||
}
|
||||
403
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
Normal file
403
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
Normal file
|
|
@ -0,0 +1,403 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
|
||||
platform "github.com/containers/image/v5/internal/pkg/platform"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
// OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index.
|
||||
// The value of the annotation must be the string "true".
|
||||
// If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression.
|
||||
// That also suggests that this instance benefits from
|
||||
// Zstd compression, so it can be preferred by compatible consumers over instances that
|
||||
// use gzip, depending on their local policy.
|
||||
OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd"
|
||||
OCI1InstanceAnnotationCompressionZSTDValue = "true"
|
||||
)
|
||||
|
||||
// OCI1IndexPublic is just an alias for the OCI index type, but one which we can
|
||||
// provide methods for.
|
||||
// This is publicly visible as c/image/manifest.OCI1Index
|
||||
// Internal users should usually use OCI1Index instead.
|
||||
type OCI1IndexPublic struct {
|
||||
imgspecv1.Index
|
||||
}
|
||||
|
||||
// MIMEType returns the MIME type of this particular manifest index.
|
||||
func (index *OCI1IndexPublic) MIMEType() string {
|
||||
return imgspecv1.MediaTypeImageIndex
|
||||
}
|
||||
|
||||
// Instances returns a slice of digests of the manifests that this index knows of.
|
||||
func (index *OCI1IndexPublic) Instances() []digest.Digest {
|
||||
results := make([]digest.Digest, len(index.Manifests))
|
||||
for i, m := range index.Manifests {
|
||||
results[i] = m.Digest
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Instance returns the ListUpdate of a particular instance in the index.
|
||||
func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.Digest == instanceDigest {
|
||||
return ListUpdate{
|
||||
Digest: manifest.Digest,
|
||||
Size: manifest.Size,
|
||||
MediaType: manifest.MediaType,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
|
||||
}
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap map[string]string) {
|
||||
// TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm
|
||||
// list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable
|
||||
// and full compressionAlghorithms list.
|
||||
for _, algo := range compressionAlgorithms {
|
||||
switch algo.Name() {
|
||||
case compression.ZstdAlgorithmName:
|
||||
annotationsMap[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []imgspecv1.Descriptor{}
|
||||
updatedAnnotations := false
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
case ListOpUpdate:
|
||||
if err := editInstance.UpdateOldDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
|
||||
}
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
if editInstance.UpdateAnnotations != nil {
|
||||
updatedAnnotations = true
|
||||
if editInstance.UpdateAffectAnnotations {
|
||||
index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations)
|
||||
} else {
|
||||
if index.Manifests[targetIndex].Annotations == nil {
|
||||
index.Manifests[targetIndex].Annotations = map[string]string{}
|
||||
}
|
||||
maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations)
|
||||
}
|
||||
}
|
||||
addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, index.Manifests[targetIndex].Annotations)
|
||||
case ListOpAdd:
|
||||
annotations := map[string]string{}
|
||||
if editInstance.AddAnnotations != nil {
|
||||
annotations = maps.Clone(editInstance.AddAnnotations)
|
||||
}
|
||||
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, annotations)
|
||||
addedEntries = append(addedEntries, imgspecv1.Descriptor{
|
||||
MediaType: editInstance.AddMediaType,
|
||||
Size: editInstance.AddSize,
|
||||
Digest: editInstance.AddDigest,
|
||||
Platform: editInstance.AddPlatform,
|
||||
Annotations: annotations})
|
||||
default:
|
||||
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
|
||||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
}
|
||||
if len(addedEntries) != 0 || updatedAnnotations {
|
||||
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool {
|
||||
return !instanceIsZstd(a) && instanceIsZstd(b)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *OCI1Index) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
|
||||
// instanceIsZstd returns true if instance is a zstd instance otherwise false.
|
||||
func instanceIsZstd(manifest imgspecv1.Descriptor) bool {
|
||||
if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type instanceCandidate struct {
|
||||
platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesn’t have a platform
|
||||
isZstd bool // tells if particular instance if zstd instance
|
||||
manifestPosition int // A zero-based index of the instance in the manifest list
|
||||
digest digest.Digest // Instance digest
|
||||
}
|
||||
|
||||
func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool {
|
||||
switch {
|
||||
case ic.platformIndex != other.platformIndex:
|
||||
return ic.platformIndex < other.platformIndex
|
||||
case ic.isZstd != other.isZstd:
|
||||
if !preferGzip {
|
||||
return ic.isZstd
|
||||
} else {
|
||||
return !ic.isZstd
|
||||
}
|
||||
case ic.manifestPosition != other.manifestPosition:
|
||||
return ic.manifestPosition < other.manifestPosition
|
||||
}
|
||||
panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
|
||||
}
|
||||
|
||||
// chooseInstance is a private equivalent to ChooseInstanceByCompression,
|
||||
// shared by ChooseInstance and ChooseInstanceByCompression.
|
||||
func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
didPreferGzip := false
|
||||
if preferGzip == types.OptionalBoolTrue {
|
||||
didPreferGzip = true
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
var bestMatch *instanceCandidate
|
||||
bestMatch = nil
|
||||
for manifestIndex, d := range index.Manifests {
|
||||
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
|
||||
if d.Platform != nil {
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
OSVersion: d.Platform.OSVersion,
|
||||
OSFeatures: slices.Clone(d.Platform.OSFeatures),
|
||||
Variant: d.Platform.Variant,
|
||||
}
|
||||
platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
|
||||
return platform.MatchesPlatform(imagePlatform, wantedPlatform)
|
||||
})
|
||||
if platformIndex == -1 {
|
||||
continue
|
||||
}
|
||||
candidate.platformIndex = platformIndex
|
||||
}
|
||||
if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
|
||||
bestMatch = &candidate
|
||||
}
|
||||
}
|
||||
if bestMatch != nil {
|
||||
return bestMatch.digest, nil
|
||||
}
|
||||
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
return index.chooseInstance(ctx, preferGzip)
|
||||
}
|
||||
|
||||
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
|
||||
// of the image which is appropriate for the current environment.
|
||||
func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||
return index.chooseInstance(ctx, types.OptionalBoolFalse)
|
||||
}
|
||||
|
||||
// Serialize returns the index in a blob format.
|
||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
||||
func (index *OCI1IndexPublic) Serialize() ([]byte, error) {
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the
|
||||
// supplied data.
|
||||
// This is publicly visible as c/image/manifest.OCI1IndexFromComponents.
|
||||
func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic {
|
||||
index := OCI1IndexPublic{
|
||||
imgspecv1.Index{
|
||||
Versioned: imgspec.Versioned{SchemaVersion: 2},
|
||||
MediaType: imgspecv1.MediaTypeImageIndex,
|
||||
Manifests: make([]imgspecv1.Descriptor, len(components)),
|
||||
Annotations: maps.Clone(annotations),
|
||||
},
|
||||
}
|
||||
for i, component := range components {
|
||||
var platform *imgspecv1.Platform
|
||||
if component.Platform != nil {
|
||||
platform = &imgspecv1.Platform{
|
||||
Architecture: component.Platform.Architecture,
|
||||
OS: component.Platform.OS,
|
||||
OSVersion: component.Platform.OSVersion,
|
||||
OSFeatures: slices.Clone(component.Platform.OSFeatures),
|
||||
Variant: component.Platform.Variant,
|
||||
}
|
||||
}
|
||||
m := imgspecv1.Descriptor{
|
||||
MediaType: component.MediaType,
|
||||
Size: component.Size,
|
||||
Digest: component.Digest,
|
||||
URLs: slices.Clone(component.URLs),
|
||||
Annotations: maps.Clone(component.Annotations),
|
||||
Platform: platform,
|
||||
}
|
||||
index.Manifests[i] = m
|
||||
}
|
||||
return &index
|
||||
}
|
||||
|
||||
// OCI1IndexPublicClone creates a deep copy of the passed-in index.
|
||||
// This is publicly visible as c/image/manifest.OCI1IndexClone.
|
||||
func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
|
||||
return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
|
||||
}
|
||||
|
||||
// ToOCI1Index returns the index encoded as an OCI1 index.
|
||||
func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
|
||||
return OCI1IndexPublicClone(index), nil
|
||||
}
|
||||
|
||||
// ToSchema2List returns the index encoded as a Schema2 list.
|
||||
func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
|
||||
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
|
||||
for _, manifest := range index.Manifests {
|
||||
platform := manifest.Platform
|
||||
if platform == nil {
|
||||
platform = &imgspecv1.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
}
|
||||
converted := Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{
|
||||
MediaType: manifest.MediaType,
|
||||
Size: manifest.Size,
|
||||
Digest: manifest.Digest,
|
||||
URLs: slices.Clone(manifest.URLs),
|
||||
},
|
||||
Schema2PlatformSpec{
|
||||
OS: platform.OS,
|
||||
Architecture: platform.Architecture,
|
||||
OSFeatures: slices.Clone(platform.OSFeatures),
|
||||
OSVersion: platform.OSVersion,
|
||||
Variant: platform.Variant,
|
||||
},
|
||||
}
|
||||
components = append(components, converted)
|
||||
}
|
||||
s2 := Schema2ListPublicFromComponents(components)
|
||||
return s2, nil
|
||||
}
|
||||
|
||||
// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled
|
||||
// JSON, presumably generated by encoding a OCI1 manifest index.
|
||||
// This is publicly visible as c/image/manifest.OCI1IndexFromManifest.
|
||||
func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) {
|
||||
index := OCI1IndexPublic{
|
||||
Index: imgspecv1.Index{
|
||||
Versioned: imgspec.Versioned{SchemaVersion: 2},
|
||||
MediaType: imgspecv1.MediaTypeImageIndex,
|
||||
Manifests: []imgspecv1.Descriptor{},
|
||||
Annotations: make(map[string]string),
|
||||
},
|
||||
}
|
||||
if err := json.Unmarshal(manifest, &index); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
|
||||
}
|
||||
if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
|
||||
AllowedFieldManifests); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &index, nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of this list and its contents.
|
||||
func (index *OCI1IndexPublic) Clone() ListPublic {
|
||||
return OCI1IndexPublicClone(index)
|
||||
}
|
||||
|
||||
// ConvertToMIMEType converts the passed-in image index to a manifest list of
|
||||
// the specified type.
|
||||
func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
|
||||
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
|
||||
case DockerV2ListMediaType:
|
||||
return index.ToSchema2List()
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
return index.Clone(), nil
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
|
||||
default:
|
||||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
|
||||
}
|
||||
}
|
||||
|
||||
type OCI1Index struct {
|
||||
OCI1IndexPublic
|
||||
}
|
||||
|
||||
func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index {
|
||||
return &OCI1Index{*public}
|
||||
}
|
||||
|
||||
func (index *OCI1Index) CloneInternal() List {
|
||||
return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic))
|
||||
}
|
||||
|
||||
func (index *OCI1Index) Clone() ListPublic {
|
||||
return index.CloneInternal()
|
||||
}
|
||||
|
||||
// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled
|
||||
// JSON, presumably generated by encoding a OCI1 manifest list.
|
||||
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
|
||||
public, err := OCI1IndexPublicFromManifest(manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return oci1IndexFromPublic(public), nil
|
||||
}
|
||||
11
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
11
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
|
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// For Linux, the kernel has already detected the ABI, ISA and Features.
|
||||
|
|
@ -152,13 +153,9 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
|||
if wantedVariant != "" {
|
||||
// If the user requested a specific variant, we'll walk down
|
||||
// the list from most to least compatible.
|
||||
if compatibility[wantedArch] != nil {
|
||||
variantOrder := compatibility[wantedArch]
|
||||
for i, v := range variantOrder {
|
||||
if wantedVariant == v {
|
||||
variants = variantOrder[i:]
|
||||
break
|
||||
}
|
||||
if variantOrder := compatibility[wantedArch]; variantOrder != nil {
|
||||
if i := slices.Index(variantOrder, wantedVariant); i != -1 {
|
||||
variants = variantOrder[i:]
|
||||
}
|
||||
}
|
||||
if variants == nil {
|
||||
|
|
|
|||
31
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
31
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
|
@ -46,24 +47,22 @@ type ImageDestinationInternalOnly interface {
|
|||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error)
|
||||
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error)
|
||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error)
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
|
||||
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error)
|
||||
|
||||
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
|
||||
|
|
@ -79,6 +78,13 @@ type ImageDestination interface {
|
|||
ImageDestinationInternalOnly
|
||||
}
|
||||
|
||||
// UploadedBlob is information about a blob written to a destination.
|
||||
// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided.
|
||||
type UploadedBlob struct {
|
||||
Digest digest.Digest
|
||||
Size int64
|
||||
}
|
||||
|
||||
// PutBlobOptions are used in PutBlobWithOptions.
|
||||
type PutBlobOptions struct {
|
||||
Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
|
||||
|
|
@ -112,6 +118,17 @@ type TryReusingBlobOptions struct {
|
|||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
}
|
||||
|
||||
// ReusedBlob is information about a blob reused in a destination.
|
||||
// It is the subset of types.BlobInfo fields the transport is responsible for setting.
|
||||
type ReusedBlob struct {
|
||||
Digest digest.Digest // Must be provided
|
||||
Size int64 // Must be provided
|
||||
// The following compression fields should be set when the reuse substitutes
|
||||
// a differently-compressed blob.
|
||||
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
|
||||
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
|
||||
}
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceChunk struct {
|
||||
|
|
|
|||
46
vendor/github.com/containers/image/v5/internal/set/set.go
generated
vendored
Normal file
46
vendor/github.com/containers/image/v5/internal/set/set.go
generated
vendored
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package set
|
||||
|
||||
import "golang.org/x/exp/maps"
|
||||
|
||||
// FIXME:
|
||||
// - Docstrings
|
||||
// - This should be in a public library somewhere
|
||||
|
||||
type Set[E comparable] struct {
|
||||
m map[E]struct{}
|
||||
}
|
||||
|
||||
func New[E comparable]() *Set[E] {
|
||||
return &Set[E]{
|
||||
m: map[E]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func NewWithValues[E comparable](values ...E) *Set[E] {
|
||||
s := New[E]()
|
||||
for _, v := range values {
|
||||
s.Add(v)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Set[E]) Add(v E) {
|
||||
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
}
|
||||
|
||||
func (s *Set[E]) Delete(v E) {
|
||||
delete(s.m, v)
|
||||
}
|
||||
|
||||
func (s *Set[E]) Contains(v E) bool {
|
||||
_, ok := s.m[v]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *Set[E]) Empty() bool {
|
||||
return len(s.m) == 0
|
||||
}
|
||||
|
||||
func (s *Set[E]) Values() []E {
|
||||
return maps.Keys(s.m)
|
||||
}
|
||||
17
vendor/github.com/containers/image/v5/internal/signature/signature.go
generated
vendored
17
vendor/github.com/containers/image/v5/internal/signature/signature.go
generated
vendored
|
|
@ -24,7 +24,7 @@ type Signature interface {
|
|||
blobChunk() ([]byte, error)
|
||||
}
|
||||
|
||||
// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage.
|
||||
// Blob returns a representation of sig as a []byte, suitable for long-term storage.
|
||||
func Blob(sig Signature) ([]byte, error) {
|
||||
chunk, err := sig.blobChunk()
|
||||
if err != nil {
|
||||
|
|
@ -66,22 +66,20 @@ func FromBlob(blob []byte) (Signature, error) {
|
|||
// The newer format: binary 0, format name, newline, data
|
||||
case 0x00:
|
||||
blob = blob[1:]
|
||||
newline := bytes.IndexByte(blob, '\n')
|
||||
if newline == -1 {
|
||||
formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'})
|
||||
if !foundNewline {
|
||||
return nil, fmt.Errorf("invalid signature format, missing newline")
|
||||
}
|
||||
formatBytes := blob[:newline]
|
||||
for _, b := range formatBytes {
|
||||
if b < 32 || b >= 0x7F {
|
||||
return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
|
||||
}
|
||||
}
|
||||
blobChunk := blob[newline+1:]
|
||||
switch {
|
||||
case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
|
||||
return SimpleSigningFromBlob(blobChunk), nil
|
||||
case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
|
||||
return SigstoreFromBlobChunk(blobChunk)
|
||||
return sigstoreFromBlobChunk(blobChunk)
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
|
||||
}
|
||||
|
|
@ -102,10 +100,3 @@ func UnsupportedFormatError(sig Signature) error {
|
|||
return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
|
||||
}
|
||||
}
|
||||
|
||||
// copyByteSlice returns a guaranteed-fresh copy of a byte slice
|
||||
// Use this to make sure the underlying data is not shared and can’t be unexpectedly modified.
|
||||
func copyByteSlice(s []byte) []byte {
|
||||
res := []byte{}
|
||||
return append(res, s...)
|
||||
}
|
||||
|
|
|
|||
33
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
33
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
|
|
@ -1,12 +1,23 @@
|
|||
package signature
|
||||
|
||||
import "encoding/json"
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
// from sigstore/cosign/pkg/types.SimpleSigningMediaType
|
||||
SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json"
|
||||
// from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey
|
||||
SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature"
|
||||
// from sigstore/cosign/pkg/oci/static.BundleAnnotationKey
|
||||
SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle"
|
||||
// from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey
|
||||
SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate"
|
||||
// from sigstore/cosign/pkg/oci/static.ChainAnnotationKey
|
||||
SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain"
|
||||
)
|
||||
|
||||
// Sigstore is a github.com/cosign/cosign signature.
|
||||
|
|
@ -34,13 +45,13 @@ type sigstoreJSONRepresentation struct {
|
|||
func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
|
||||
return Sigstore{
|
||||
untrustedMIMEType: untrustedMimeType,
|
||||
untrustedPayload: copyByteSlice(untrustedPayload),
|
||||
untrustedAnnotations: copyStringMap(untrustedAnnotations),
|
||||
untrustedPayload: slices.Clone(untrustedPayload),
|
||||
untrustedAnnotations: maps.Clone(untrustedAnnotations),
|
||||
}
|
||||
}
|
||||
|
||||
// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
|
||||
func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
|
||||
// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
|
||||
func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
|
||||
var v sigstoreJSONRepresentation
|
||||
if err := json.Unmarshal(blobChunk, &v); err != nil {
|
||||
return Sigstore{}, err
|
||||
|
|
@ -68,17 +79,9 @@ func (s Sigstore) UntrustedMIMEType() string {
|
|||
return s.untrustedMIMEType
|
||||
}
|
||||
func (s Sigstore) UntrustedPayload() []byte {
|
||||
return copyByteSlice(s.untrustedPayload)
|
||||
return slices.Clone(s.untrustedPayload)
|
||||
}
|
||||
|
||||
func (s Sigstore) UntrustedAnnotations() map[string]string {
|
||||
return copyStringMap(s.untrustedAnnotations)
|
||||
}
|
||||
|
||||
func copyStringMap(m map[string]string) map[string]string {
|
||||
res := map[string]string{}
|
||||
for k, v := range m {
|
||||
res[k] = v
|
||||
}
|
||||
return res
|
||||
return maps.Clone(s.untrustedAnnotations)
|
||||
}
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/internal/signature/simple.go
generated
vendored
8
vendor/github.com/containers/image/v5/internal/signature/simple.go
generated
vendored
|
|
@ -1,5 +1,7 @@
|
|||
package signature
|
||||
|
||||
import "golang.org/x/exp/slices"
|
||||
|
||||
// SimpleSigning is a “simple signing” signature.
|
||||
type SimpleSigning struct {
|
||||
untrustedSignature []byte
|
||||
|
|
@ -8,7 +10,7 @@ type SimpleSigning struct {
|
|||
// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
|
||||
func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
|
||||
return SimpleSigning{
|
||||
untrustedSignature: copyByteSlice(blobChunk),
|
||||
untrustedSignature: slices.Clone(blobChunk),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -19,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID {
|
|||
// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
|
||||
// Almost everyone should use signature.Blob() instead.
|
||||
func (s SimpleSigning) blobChunk() ([]byte, error) {
|
||||
return copyByteSlice(s.untrustedSignature), nil
|
||||
return slices.Clone(s.untrustedSignature), nil
|
||||
}
|
||||
|
||||
func (s SimpleSigning) UntrustedSignature() []byte {
|
||||
return copyByteSlice(s.untrustedSignature)
|
||||
return slices.Clone(s.untrustedSignature)
|
||||
}
|
||||
|
|
|
|||
47
vendor/github.com/containers/image/v5/internal/signer/signer.go
generated
vendored
Normal file
47
vendor/github.com/containers/image/v5/internal/signer/signer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package signer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
)
|
||||
|
||||
// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
|
||||
// This type is visible to external callers, so it has no public fields or methods apart from Close().
|
||||
//
|
||||
// The owner of a Signer must call Close() when done.
|
||||
type Signer struct {
|
||||
implementation SignerImplementation
|
||||
}
|
||||
|
||||
// NewSigner creates a public Signer from a SignerImplementation
|
||||
func NewSigner(impl SignerImplementation) *Signer {
|
||||
return &Signer{implementation: impl}
|
||||
}
|
||||
|
||||
func (s *Signer) Close() error {
|
||||
return s.implementation.Close()
|
||||
}
|
||||
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
// Alternatively, should SignImageManifest be provided a logging writer of some kind?
|
||||
func ProgressMessage(signer *Signer) string {
|
||||
return signer.implementation.ProgressMessage()
|
||||
}
|
||||
|
||||
// SignImageManifest invokes a SignerImplementation.
|
||||
// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage.
|
||||
func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) {
|
||||
return signer.implementation.SignImageManifest(ctx, manifest, dockerReference)
|
||||
}
|
||||
|
||||
// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
|
||||
// This interface is distinct from Signer so that implementations can be created outside of this package.
|
||||
type SignerImplementation interface {
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
ProgressMessage() string
|
||||
// SignImageManifest creates a new signature for manifest m as dockerReference.
|
||||
SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error)
|
||||
Close() error
|
||||
}
|
||||
2
vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
generated
vendored
|
|
@ -11,7 +11,7 @@ import (
|
|||
// The net/http package uses a separate goroutine to upload data to a HTTP connection,
|
||||
// and it is possible for the server to return a response (typically an error) before consuming
|
||||
// the full body of the request. In that case http.Client.Do can return with an error while
|
||||
// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
|
||||
// the body is still being read — regardless of the cancellation, if any, of http.Request.Context().
|
||||
//
|
||||
// As a result, any data used/updated by the io.Reader() provided as the request body may be
|
||||
// used/updated even after http.Client.Do returns, causing races.
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/useragent/useragent.go
generated
vendored
Normal file
6
vendor/github.com/containers/image/v5/internal/useragent/useragent.go
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
package useragent
|
||||
|
||||
import "github.com/containers/image/v5/version"
|
||||
|
||||
// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise.
|
||||
var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
|
||||
104
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
104
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
|
|
@ -1,7 +1,6 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
|
|
@ -9,96 +8,6 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// dupStringSlice returns a deep copy of a slice of strings, or nil if the
|
||||
// source slice is empty.
|
||||
func dupStringSlice(list []string) []string {
|
||||
if len(list) == 0 {
|
||||
return nil
|
||||
}
|
||||
dup := make([]string, len(list))
|
||||
copy(dup, list)
|
||||
return dup
|
||||
}
|
||||
|
||||
// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
|
||||
// passed-in map is nil or has no keys.
|
||||
func dupStringStringMap(m map[string]string) map[string]string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make(map[string]string)
|
||||
for k, v := range m {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat
|
||||
// can expect to be present.
|
||||
type allowedManifestFields int
|
||||
|
||||
const (
|
||||
allowedFieldConfig allowedManifestFields = 1 << iota
|
||||
allowedFieldFSLayers
|
||||
allowedFieldHistory
|
||||
allowedFieldLayers
|
||||
allowedFieldManifests
|
||||
allowedFieldFirstUnusedBit // Keep this at the end!
|
||||
)
|
||||
|
||||
// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
|
||||
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
|
||||
// other than the ones the caller specifically allows.
|
||||
// expectedMIMEType is used only for diagnostics.
|
||||
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
|
||||
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
|
||||
// data that just isn’t what was expected, as opposed to actually ambiguous data.
|
||||
func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
|
||||
allowed allowedManifestFields) error {
|
||||
if allowed >= allowedFieldFirstUnusedBit {
|
||||
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
|
||||
}
|
||||
// Use a private type to decode, not just a map[string]interface{}, because we want
|
||||
// to also reject case-insensitive matches (which would be used by Go when really decoding
|
||||
// the manifest).
|
||||
// (It is expected that as manifest formats are added or extended over time, more fields will be added
|
||||
// here.)
|
||||
detectedFields := struct {
|
||||
Config interface{} `json:"config"`
|
||||
FSLayers interface{} `json:"fsLayers"`
|
||||
History interface{} `json:"history"`
|
||||
Layers interface{} `json:"layers"`
|
||||
Manifests interface{} `json:"manifests"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
|
||||
// The caller was supposed to already validate version numbers, so this should not happen;
|
||||
// let’s not bother with making this error “nice”.
|
||||
return err
|
||||
}
|
||||
unexpected := []string{}
|
||||
// Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste.
|
||||
if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 {
|
||||
unexpected = append(unexpected, "config")
|
||||
}
|
||||
if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 {
|
||||
unexpected = append(unexpected, "fsLayers")
|
||||
}
|
||||
if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 {
|
||||
unexpected = append(unexpected, "history")
|
||||
}
|
||||
if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 {
|
||||
unexpected = append(unexpected, "layers")
|
||||
}
|
||||
if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 {
|
||||
unexpected = append(unexpected, "manifests")
|
||||
}
|
||||
if len(unexpected) != 0 {
|
||||
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
|
||||
unexpected, expectedMIMEType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func layerInfosToStrings(infos []LayerInfo) []string {
|
||||
|
|
@ -228,3 +137,16 @@ func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet,
|
|||
variants := findCompressionMIMETypeSet(variantTable, mimeType)
|
||||
return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
|
||||
}
|
||||
|
||||
// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer {
|
||||
layers := make([]types.ImageInspectLayer, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i].MIMEType = info.MediaType
|
||||
layers[i].Digest = info.Digest
|
||||
layers[i].Size = info.Size
|
||||
layers[i].Annotations = info.Annotations
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
|
|
|||
33
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
33
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
|
|
@ -4,14 +4,17 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
|
||||
|
|
@ -53,16 +56,16 @@ type Schema1V1Compatibility struct {
|
|||
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
|
||||
// (NOTE: The instance is not necessary a literal representation of the original blob,
|
||||
// layers with duplicate IDs are eliminated.)
|
||||
func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) {
|
||||
s1 := Schema1{}
|
||||
if err := json.Unmarshal(manifest, &s1); err != nil {
|
||||
if err := json.Unmarshal(manifestBlob, &s1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s1.SchemaVersion != 1 {
|
||||
return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType,
|
||||
allowedFieldFSLayers|allowedFieldHistory); err != nil {
|
||||
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType,
|
||||
manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s1.initialize(); err != nil {
|
||||
|
|
@ -183,22 +186,22 @@ func (m *Schema1) fixManifestLayers() error {
|
|||
return errors.New("Invalid parent ID in the base layer of the image")
|
||||
}
|
||||
// check general duplicates to error instead of a deadlock
|
||||
idmap := make(map[string]struct{})
|
||||
idmap := set.New[string]()
|
||||
var lastID string
|
||||
for _, img := range m.ExtractedV1Compatibility {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
if img.ID != lastID && idmap.Contains(img.ID) {
|
||||
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
}
|
||||
lastID = img.ID
|
||||
idmap[lastID] = struct{}{}
|
||||
idmap.Add(lastID)
|
||||
}
|
||||
// backwards loop so that we keep the remaining indexes after removing items
|
||||
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
|
||||
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
|
||||
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
|
||||
m.History = append(m.History[:i], m.History[i+1:]...)
|
||||
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
|
||||
m.FSLayers = slices.Delete(m.FSLayers, i, i+1)
|
||||
m.History = slices.Delete(m.History, i, i+1)
|
||||
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
|
||||
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
}
|
||||
|
|
@ -206,7 +209,7 @@ func (m *Schema1) fixManifestLayers() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||
var validHex = regexp.Delayed(`^([a-f0-9]{64})$`)
|
||||
|
||||
func validateV1ID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
|
|
@ -221,13 +224,17 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
|
|||
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerInfos := m.LayerInfos()
|
||||
i := &types.ImageInspectInfo{
|
||||
Tag: m.Tag,
|
||||
Created: &s1.Created,
|
||||
DockerVersion: s1.DockerVersion,
|
||||
Architecture: s1.Architecture,
|
||||
Variant: s1.Variant,
|
||||
Os: s1.OS,
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(layerInfos),
|
||||
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
|
||||
Author: s1.Author,
|
||||
}
|
||||
if s1.Config != nil {
|
||||
i.Labels = s1.Config.Labels
|
||||
|
|
|
|||
21
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
21
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/pkg/strslice"
|
||||
"github.com/containers/image/v5/types"
|
||||
|
|
@ -12,12 +13,7 @@ import (
|
|||
)
|
||||
|
||||
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
|
||||
type Schema2Descriptor struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
Size int64 `json:"size"`
|
||||
Digest digest.Digest `json:"digest"`
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
}
|
||||
type Schema2Descriptor = manifest.Schema2Descriptor
|
||||
|
||||
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
|
||||
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
|
||||
|
|
@ -159,13 +155,13 @@ type Schema2Image struct {
|
|||
}
|
||||
|
||||
// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
|
||||
func Schema2FromManifest(manifest []byte) (*Schema2, error) {
|
||||
func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) {
|
||||
s2 := Schema2{}
|
||||
if err := json.Unmarshal(manifest, &s2); err != nil {
|
||||
if err := json.Unmarshal(manifestBlob, &s2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType,
|
||||
allowedFieldConfig|allowedFieldLayers); err != nil {
|
||||
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType,
|
||||
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check manifest's and layers' media types.
|
||||
|
|
@ -271,6 +267,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
|
|||
if err := json.Unmarshal(config, s2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerInfos := m.LayerInfos()
|
||||
i := &types.ImageInspectInfo{
|
||||
Tag: "",
|
||||
Created: &s2.Created,
|
||||
|
|
@ -278,7 +275,9 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
|
|||
Architecture: s2.Architecture,
|
||||
Variant: s2.Variant,
|
||||
Os: s2.OS,
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(layerInfos),
|
||||
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
|
||||
Author: s2.Author,
|
||||
}
|
||||
if s2.Config != nil {
|
||||
i.Labels = s2.Config.Labels
|
||||
|
|
|
|||
204
vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
generated
vendored
204
vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
generated
vendored
|
|
@ -1,220 +1,32 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
platform "github.com/containers/image/v5/internal/pkg/platform"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
)
|
||||
|
||||
// Schema2PlatformSpec describes the platform which a particular manifest is
|
||||
// specialized for.
|
||||
type Schema2PlatformSpec struct {
|
||||
Architecture string `json:"architecture"`
|
||||
OS string `json:"os"`
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
Variant string `json:"variant,omitempty"`
|
||||
Features []string `json:"features,omitempty"` // removed in OCI
|
||||
}
|
||||
type Schema2PlatformSpec = manifest.Schema2PlatformSpec
|
||||
|
||||
// Schema2ManifestDescriptor references a platform-specific manifest.
|
||||
type Schema2ManifestDescriptor struct {
|
||||
Schema2Descriptor
|
||||
Platform Schema2PlatformSpec `json:"platform"`
|
||||
}
|
||||
type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor
|
||||
|
||||
// Schema2List is a list of platform-specific manifests.
|
||||
type Schema2List struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Manifests []Schema2ManifestDescriptor `json:"manifests"`
|
||||
}
|
||||
|
||||
// MIMEType returns the MIME type of this particular manifest list.
|
||||
func (list *Schema2List) MIMEType() string {
|
||||
return list.MediaType
|
||||
}
|
||||
|
||||
// Instances returns a slice of digests of the manifests that this list knows of.
|
||||
func (list *Schema2List) Instances() []digest.Digest {
|
||||
results := make([]digest.Digest, len(list.Manifests))
|
||||
for i, m := range list.Manifests {
|
||||
results[i] = m.Digest
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Instance returns the ListUpdate of a particular instance in the list.
|
||||
func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
|
||||
for _, manifest := range list.Manifests {
|
||||
if manifest.Digest == instanceDigest {
|
||||
return ListUpdate{
|
||||
Digest: manifest.Digest,
|
||||
Size: manifest.Size,
|
||||
MediaType: manifest.MediaType,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
|
||||
}
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
|
||||
if len(updates) != len(list.Manifests) {
|
||||
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
|
||||
}
|
||||
for i := range updates {
|
||||
if err := updates[i].Digest.Validate(); err != nil {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
|
||||
}
|
||||
list.Manifests[i].Digest = updates[i].Digest
|
||||
if updates[i].Size < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
|
||||
}
|
||||
list.Manifests[i].Size = updates[i].Size
|
||||
if updates[i].MediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
|
||||
}
|
||||
list.Manifests[i].MediaType = updates[i].MediaType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
|
||||
// of the image which is appropriate for the current environment.
|
||||
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
for _, d := range list.Manifests {
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
OSVersion: d.Platform.OSVersion,
|
||||
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
|
||||
Variant: d.Platform.Variant,
|
||||
}
|
||||
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
|
||||
return d.Digest, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
// Serialize returns the list in a blob format.
|
||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
||||
func (list *Schema2List) Serialize() ([]byte, error) {
|
||||
buf, err := json.Marshal(list)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
type Schema2List = manifest.Schema2ListPublic
|
||||
|
||||
// Schema2ListFromComponents creates a Schema2 manifest list instance from the
|
||||
// supplied data.
|
||||
func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List {
|
||||
list := Schema2List{
|
||||
SchemaVersion: 2,
|
||||
MediaType: DockerV2ListMediaType,
|
||||
Manifests: make([]Schema2ManifestDescriptor, len(components)),
|
||||
}
|
||||
for i, component := range components {
|
||||
m := Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{
|
||||
MediaType: component.MediaType,
|
||||
Size: component.Size,
|
||||
Digest: component.Digest,
|
||||
URLs: dupStringSlice(component.URLs),
|
||||
},
|
||||
Schema2PlatformSpec{
|
||||
Architecture: component.Platform.Architecture,
|
||||
OS: component.Platform.OS,
|
||||
OSVersion: component.Platform.OSVersion,
|
||||
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
|
||||
Variant: component.Platform.Variant,
|
||||
Features: dupStringSlice(component.Platform.Features),
|
||||
},
|
||||
}
|
||||
list.Manifests[i] = m
|
||||
}
|
||||
return &list
|
||||
return manifest.Schema2ListPublicFromComponents(components)
|
||||
}
|
||||
|
||||
// Schema2ListClone creates a deep copy of the passed-in list.
|
||||
func Schema2ListClone(list *Schema2List) *Schema2List {
|
||||
return Schema2ListFromComponents(list.Manifests)
|
||||
}
|
||||
|
||||
// ToOCI1Index returns the list encoded as an OCI1 index.
|
||||
func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) {
|
||||
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
|
||||
for _, manifest := range list.Manifests {
|
||||
converted := imgspecv1.Descriptor{
|
||||
MediaType: manifest.MediaType,
|
||||
Size: manifest.Size,
|
||||
Digest: manifest.Digest,
|
||||
URLs: dupStringSlice(manifest.URLs),
|
||||
Platform: &imgspecv1.Platform{
|
||||
OS: manifest.Platform.OS,
|
||||
Architecture: manifest.Platform.Architecture,
|
||||
OSFeatures: dupStringSlice(manifest.Platform.OSFeatures),
|
||||
OSVersion: manifest.Platform.OSVersion,
|
||||
Variant: manifest.Platform.Variant,
|
||||
},
|
||||
}
|
||||
components = append(components, converted)
|
||||
}
|
||||
oci := OCI1IndexFromComponents(components, nil)
|
||||
return oci, nil
|
||||
}
|
||||
|
||||
// ToSchema2List returns the list encoded as a Schema2 list.
|
||||
func (list *Schema2List) ToSchema2List() (*Schema2List, error) {
|
||||
return Schema2ListClone(list), nil
|
||||
return manifest.Schema2ListPublicClone(list)
|
||||
}
|
||||
|
||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
// JSON, presumably generated by encoding a Schema2 manifest list.
|
||||
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
|
||||
list := Schema2List{
|
||||
Manifests: []Schema2ManifestDescriptor{},
|
||||
}
|
||||
if err := json.Unmarshal(manifest, &list); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
|
||||
allowedFieldManifests); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &list, nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of this list and its contents.
|
||||
func (list *Schema2List) Clone() List {
|
||||
return Schema2ListClone(list)
|
||||
}
|
||||
|
||||
// ConvertToMIMEType converts the passed-in manifest list to a manifest
|
||||
// list of the specified type.
|
||||
func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) {
|
||||
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
|
||||
case DockerV2ListMediaType:
|
||||
return list.Clone(), nil
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
return list.ToOCI1Index()
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
|
||||
default:
|
||||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
|
||||
}
|
||||
func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) {
|
||||
return manifest.Schema2ListPublicFromManifest(manifestBlob)
|
||||
}
|
||||
|
|
|
|||
55
vendor/github.com/containers/image/v5/manifest/list.go
generated
vendored
55
vendor/github.com/containers/image/v5/manifest/list.go
generated
vendored
|
|
@ -1,10 +1,7 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
|
|
@ -21,56 +18,14 @@ var (
|
|||
// Callers can either use this abstract interface without understanding the details of the formats,
|
||||
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
|
||||
// directly.
|
||||
type List interface {
|
||||
// MIMEType returns the MIME type of this particular manifest list.
|
||||
MIMEType() string
|
||||
|
||||
// Instances returns a list of the manifests that this list knows of, other than its own.
|
||||
Instances() []digest.Digest
|
||||
|
||||
// Update information about the list's instances. The length of the passed-in slice must
|
||||
// match the length of the list of instances which the list already contains, and every field
|
||||
// must be specified.
|
||||
UpdateInstances([]ListUpdate) error
|
||||
|
||||
// Instance returns the size and MIME type of a particular instance in the list.
|
||||
Instance(digest.Digest) (ListUpdate, error)
|
||||
|
||||
// ChooseInstance selects which manifest is most appropriate for the platform described by the
|
||||
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
|
||||
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
|
||||
|
||||
// Serialize returns the list in a blob format.
|
||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
|
||||
// from, even if no modifications were made!
|
||||
Serialize() ([]byte, error)
|
||||
|
||||
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
|
||||
ConvertToMIMEType(mimeType string) (List, error)
|
||||
|
||||
// Clone returns a deep copy of this list and its contents.
|
||||
Clone() List
|
||||
}
|
||||
type List = manifest.ListPublic
|
||||
|
||||
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
|
||||
type ListUpdate struct {
|
||||
Digest digest.Digest
|
||||
Size int64
|
||||
MediaType string
|
||||
}
|
||||
type ListUpdate = manifest.ListUpdate
|
||||
|
||||
// ListFromBlob parses a list of manifests.
|
||||
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
|
||||
normalized := NormalizedMIMEType(manifestMIMEType)
|
||||
switch normalized {
|
||||
case DockerV2ListMediaType:
|
||||
return Schema2ListFromManifest(manifest)
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
return OCI1IndexFromManifest(manifest)
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
|
||||
}
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
|
||||
func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) {
|
||||
return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType)
|
||||
}
|
||||
|
||||
// ConvertListToMIMEType converts the passed-in manifest list to a manifest
|
||||
|
|
|
|||
141
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
141
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
|
|
@ -1,10 +1,9 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/libtrust"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
|
@ -16,28 +15,28 @@ import (
|
|||
// FIXME(runcom, mitr): should we have a mediatype pkg??
|
||||
const (
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
|
||||
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
|
||||
DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
|
||||
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
|
||||
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
|
||||
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
|
||||
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
|
||||
DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType
|
||||
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
|
||||
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
|
||||
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed
|
||||
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
|
||||
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
DockerV2ListMediaType = manifest.DockerV2ListMediaType
|
||||
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
|
||||
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
||||
DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType
|
||||
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
|
||||
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
|
||||
)
|
||||
|
||||
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
|
||||
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
|
||||
type NonImageArtifactError = internalManifest.NonImageArtifactError
|
||||
type NonImageArtifactError = manifest.NonImageArtifactError
|
||||
|
||||
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
|
||||
func SupportedSchema2MediaType(m string) error {
|
||||
|
|
@ -102,102 +101,21 @@ type LayerInfo struct {
|
|||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||
func GuessMIMEType(manifest []byte) string {
|
||||
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
|
||||
// Also docker/distribution/manifest.Versioned.
|
||||
meta := struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
Signatures interface{} `json:"signatures"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &meta); err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch meta.MediaType {
|
||||
case DockerV2Schema2MediaType, DockerV2ListMediaType,
|
||||
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
|
||||
return meta.MediaType
|
||||
}
|
||||
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
|
||||
switch meta.SchemaVersion {
|
||||
case 1:
|
||||
if meta.Signatures != nil {
|
||||
return DockerV2Schema1SignedMediaType
|
||||
}
|
||||
return DockerV2Schema1MediaType
|
||||
case 2:
|
||||
// Best effort to understand if this is an OCI image since mediaType
|
||||
// wasn't in the manifest for OCI image-spec < 1.0.2.
|
||||
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
|
||||
ociMan := struct {
|
||||
Config struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
} `json:"config"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &ociMan); err != nil {
|
||||
return ""
|
||||
}
|
||||
switch ociMan.Config.MediaType {
|
||||
case imgspecv1.MediaTypeImageConfig:
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
case DockerV2Schema2ConfigMediaType:
|
||||
// This case should not happen since a Docker image
|
||||
// must declare a top-level media type and
|
||||
// `meta.MediaType` has already been checked.
|
||||
return DockerV2Schema2MediaType
|
||||
}
|
||||
// Maybe an image index or an OCI artifact.
|
||||
ociIndex := struct {
|
||||
Manifests []imgspecv1.Descriptor `json:"manifests"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
|
||||
return ""
|
||||
}
|
||||
if len(ociIndex.Manifests) != 0 {
|
||||
if ociMan.Config.MediaType == "" {
|
||||
return imgspecv1.MediaTypeImageIndex
|
||||
}
|
||||
// FIXME: this is mixing media types of manifests and configs.
|
||||
return ociMan.Config.MediaType
|
||||
}
|
||||
// It's most likely an OCI artifact with a custom config media
|
||||
// type which is not (and cannot) be covered by the media-type
|
||||
// checks cabove.
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
}
|
||||
return ""
|
||||
func GuessMIMEType(manifestBlob []byte) string {
|
||||
return manifest.GuessMIMEType(manifestBlob)
|
||||
}
|
||||
|
||||
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
|
||||
func Digest(manifest []byte) (digest.Digest, error) {
|
||||
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
|
||||
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
manifest, err = sig.Payload()
|
||||
if err != nil {
|
||||
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
|
||||
// that libtrust itself has josebase64UrlEncode()d
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return digest.FromBytes(manifest), nil
|
||||
func Digest(manifestBlob []byte) (digest.Digest, error) {
|
||||
return manifest.Digest(manifestBlob)
|
||||
}
|
||||
|
||||
// MatchesDigest returns true iff the manifest matches expectedDigest.
|
||||
// Error may be set if this returns false.
|
||||
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
|
||||
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
|
||||
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
|
||||
// This should eventually support various digest types.
|
||||
actualDigest, err := Digest(manifest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return expectedDigest == actualDigest, nil
|
||||
func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) {
|
||||
return manifest.MatchesDigest(manifestBlob, expectedDigest)
|
||||
}
|
||||
|
||||
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
|
||||
|
|
@ -231,30 +149,7 @@ func MIMETypeSupportsEncryption(mimeType string) bool {
|
|||
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
|
||||
// centralizing various workarounds.
|
||||
func NormalizedMIMEType(input string) string {
|
||||
switch input {
|
||||
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
|
||||
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
||||
// need to happen within the ImageSource.
|
||||
case "application/json":
|
||||
return DockerV2Schema1SignedMediaType
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
imgspecv1.MediaTypeImageIndex,
|
||||
DockerV2Schema2MediaType,
|
||||
DockerV2ListMediaType:
|
||||
return input
|
||||
default:
|
||||
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
|
||||
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
|
||||
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
|
||||
//
|
||||
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
|
||||
// This makes no real sense, but it happens
|
||||
// because requests for manifests are
|
||||
// redirected to a content distribution
|
||||
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
|
||||
return DockerV2Schema1SignedMediaType
|
||||
}
|
||||
return manifest.NormalizedMIMEType(input)
|
||||
}
|
||||
|
||||
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
|
||||
|
|
|
|||
45
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
45
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
|
|
@ -5,13 +5,14 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
|
||||
|
|
@ -41,7 +42,12 @@ type OCI1 struct {
|
|||
// useful for validation anyway.
|
||||
func SupportedOCI1MediaType(m string) error {
|
||||
switch m {
|
||||
case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
|
||||
case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig,
|
||||
imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd,
|
||||
imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
imgspecv1.MediaTypeLayoutHeader,
|
||||
ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported OCIv1 media type: %q", m)
|
||||
|
|
@ -49,13 +55,13 @@ func SupportedOCI1MediaType(m string) error {
|
|||
}
|
||||
|
||||
// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
|
||||
func OCI1FromManifest(manifest []byte) (*OCI1, error) {
|
||||
func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
|
||||
oci1 := OCI1{}
|
||||
if err := json.Unmarshal(manifest, &oci1); err != nil {
|
||||
if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
|
||||
allowedFieldConfig|allowedFieldLayers); err != nil {
|
||||
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
|
||||
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &oci1, nil
|
||||
|
|
@ -101,9 +107,9 @@ func (m *OCI1) LayerInfos() []LayerInfo {
|
|||
|
||||
var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
|
||||
{
|
||||
mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
|
||||
compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip,
|
||||
compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd,
|
||||
mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
},
|
||||
{
|
||||
mtsUncompressed: imgspecv1.MediaTypeImageLayer,
|
||||
|
|
@ -160,14 +166,13 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// an error if the mediatype does not support encryption
|
||||
func getEncryptedMediaType(mediatype string) (string, error) {
|
||||
for _, s := range strings.Split(mediatype, "+")[1:] {
|
||||
if s == "encrypted" {
|
||||
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
|
||||
}
|
||||
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
|
||||
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
|
||||
}
|
||||
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
|
||||
switch unsuffixedMediatype {
|
||||
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
|
||||
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer,
|
||||
imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
return mediatype + "+encrypted", nil
|
||||
}
|
||||
|
||||
|
|
@ -178,7 +183,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
|
|||
// an error if the mediatype does not support decryption
|
||||
func getDecryptedMediaType(mediatype string) (string, error) {
|
||||
if !strings.HasSuffix(mediatype, "+encrypted") {
|
||||
return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype)
|
||||
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(mediatype, "+encrypted"), nil
|
||||
|
|
@ -197,7 +202,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
|||
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
|
||||
// and is probably better served by failing; we can always re-visit that later if we fail now, but
|
||||
// if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later.
|
||||
return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
return nil, manifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
}
|
||||
|
||||
config, err := configGetter(m.ConfigInfo())
|
||||
|
|
@ -212,15 +217,19 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
|||
if err := json.Unmarshal(config, d1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerInfos := m.LayerInfos()
|
||||
i := &types.ImageInspectInfo{
|
||||
Tag: "",
|
||||
Created: v1.Created,
|
||||
DockerVersion: d1.DockerVersion,
|
||||
Labels: v1.Config.Labels,
|
||||
Architecture: v1.Architecture,
|
||||
Variant: v1.Variant,
|
||||
Os: v1.OS,
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(layerInfos),
|
||||
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
|
||||
Env: v1.Config.Env,
|
||||
Author: v1.Author,
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -244,7 +253,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
|||
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
|
||||
// which can’t work with non-image artifacts.)
|
||||
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
|
||||
return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
return "", manifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
}
|
||||
|
||||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
|
|
|
|||
217
vendor/github.com/containers/image/v5/manifest/oci_index.go
generated
vendored
217
vendor/github.com/containers/image/v5/manifest/oci_index.go
generated
vendored
|
|
@ -1,232 +1,27 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
platform "github.com/containers/image/v5/internal/pkg/platform"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// OCI1Index is just an alias for the OCI index type, but one which we can
|
||||
// provide methods for.
|
||||
type OCI1Index struct {
|
||||
imgspecv1.Index
|
||||
}
|
||||
|
||||
// MIMEType returns the MIME type of this particular manifest index.
|
||||
func (index *OCI1Index) MIMEType() string {
|
||||
return imgspecv1.MediaTypeImageIndex
|
||||
}
|
||||
|
||||
// Instances returns a slice of digests of the manifests that this index knows of.
|
||||
func (index *OCI1Index) Instances() []digest.Digest {
|
||||
results := make([]digest.Digest, len(index.Manifests))
|
||||
for i, m := range index.Manifests {
|
||||
results[i] = m.Digest
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Instance returns the ListUpdate of a particular instance in the index.
|
||||
func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.Digest == instanceDigest {
|
||||
return ListUpdate{
|
||||
Digest: manifest.Digest,
|
||||
Size: manifest.Size,
|
||||
MediaType: manifest.MediaType,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
|
||||
}
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
|
||||
if len(updates) != len(index.Manifests) {
|
||||
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
|
||||
}
|
||||
for i := range updates {
|
||||
if err := updates[i].Digest.Validate(); err != nil {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
|
||||
}
|
||||
index.Manifests[i].Digest = updates[i].Digest
|
||||
if updates[i].Size < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
|
||||
}
|
||||
index.Manifests[i].Size = updates[i].Size
|
||||
if updates[i].MediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[i].MediaType = updates[i].MediaType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
|
||||
// of the image which is appropriate for the current environment.
|
||||
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
for _, d := range index.Manifests {
|
||||
if d.Platform == nil {
|
||||
continue
|
||||
}
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
OSVersion: d.Platform.OSVersion,
|
||||
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
|
||||
Variant: d.Platform.Variant,
|
||||
}
|
||||
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
|
||||
return d.Digest, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, d := range index.Manifests {
|
||||
if d.Platform == nil {
|
||||
return d.Digest, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
// Serialize returns the index in a blob format.
|
||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
||||
func (index *OCI1Index) Serialize() ([]byte, error) {
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
type OCI1Index = manifest.OCI1IndexPublic
|
||||
|
||||
// OCI1IndexFromComponents creates an OCI1 image index instance from the
|
||||
// supplied data.
|
||||
func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index {
|
||||
index := OCI1Index{
|
||||
imgspecv1.Index{
|
||||
Versioned: imgspec.Versioned{SchemaVersion: 2},
|
||||
MediaType: imgspecv1.MediaTypeImageIndex,
|
||||
Manifests: make([]imgspecv1.Descriptor, len(components)),
|
||||
Annotations: dupStringStringMap(annotations),
|
||||
},
|
||||
}
|
||||
for i, component := range components {
|
||||
var platform *imgspecv1.Platform
|
||||
if component.Platform != nil {
|
||||
platform = &imgspecv1.Platform{
|
||||
Architecture: component.Platform.Architecture,
|
||||
OS: component.Platform.OS,
|
||||
OSVersion: component.Platform.OSVersion,
|
||||
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
|
||||
Variant: component.Platform.Variant,
|
||||
}
|
||||
}
|
||||
m := imgspecv1.Descriptor{
|
||||
MediaType: component.MediaType,
|
||||
Size: component.Size,
|
||||
Digest: component.Digest,
|
||||
URLs: dupStringSlice(component.URLs),
|
||||
Annotations: dupStringStringMap(component.Annotations),
|
||||
Platform: platform,
|
||||
}
|
||||
index.Manifests[i] = m
|
||||
}
|
||||
return &index
|
||||
return manifest.OCI1IndexPublicFromComponents(components, annotations)
|
||||
}
|
||||
|
||||
// OCI1IndexClone creates a deep copy of the passed-in index.
|
||||
func OCI1IndexClone(index *OCI1Index) *OCI1Index {
|
||||
return OCI1IndexFromComponents(index.Manifests, index.Annotations)
|
||||
}
|
||||
|
||||
// ToOCI1Index returns the index encoded as an OCI1 index.
|
||||
func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) {
|
||||
return OCI1IndexClone(index), nil
|
||||
}
|
||||
|
||||
// ToSchema2List returns the index encoded as a Schema2 list.
|
||||
func (index *OCI1Index) ToSchema2List() (*Schema2List, error) {
|
||||
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
|
||||
for _, manifest := range index.Manifests {
|
||||
platform := manifest.Platform
|
||||
if platform == nil {
|
||||
platform = &imgspecv1.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
}
|
||||
converted := Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{
|
||||
MediaType: manifest.MediaType,
|
||||
Size: manifest.Size,
|
||||
Digest: manifest.Digest,
|
||||
URLs: dupStringSlice(manifest.URLs),
|
||||
},
|
||||
Schema2PlatformSpec{
|
||||
OS: platform.OS,
|
||||
Architecture: platform.Architecture,
|
||||
OSFeatures: dupStringSlice(platform.OSFeatures),
|
||||
OSVersion: platform.OSVersion,
|
||||
Variant: platform.Variant,
|
||||
},
|
||||
}
|
||||
components = append(components, converted)
|
||||
}
|
||||
s2 := Schema2ListFromComponents(components)
|
||||
return s2, nil
|
||||
return manifest.OCI1IndexPublicClone(index)
|
||||
}
|
||||
|
||||
// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled
|
||||
// JSON, presumably generated by encoding a OCI1 manifest index.
|
||||
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
|
||||
index := OCI1Index{
|
||||
Index: imgspecv1.Index{
|
||||
Versioned: imgspec.Versioned{SchemaVersion: 2},
|
||||
MediaType: imgspecv1.MediaTypeImageIndex,
|
||||
Manifests: []imgspecv1.Descriptor{},
|
||||
Annotations: make(map[string]string),
|
||||
},
|
||||
}
|
||||
if err := json.Unmarshal(manifest, &index); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
|
||||
allowedFieldManifests); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &index, nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of this list and its contents.
|
||||
func (index *OCI1Index) Clone() List {
|
||||
return OCI1IndexClone(index)
|
||||
}
|
||||
|
||||
// ConvertToMIMEType converts the passed-in image index to a manifest list of
|
||||
// the specified type.
|
||||
func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) {
|
||||
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
|
||||
case DockerV2ListMediaType:
|
||||
return index.ToSchema2List()
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
return index.Clone(), nil
|
||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
|
||||
default:
|
||||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
|
||||
}
|
||||
func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) {
|
||||
return manifest.OCI1IndexPublicFromManifest(manifestBlob)
|
||||
}
|
||||
|
|
|
|||
12
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
12
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
|
|
@ -109,8 +109,8 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
|||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||
return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
|
||||
}
|
||||
|
||||
|
|
@ -119,18 +119,16 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
|
|||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
|
||||
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
|
||||
}
|
||||
|
||||
|
|
|
|||
15
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
15
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
|
|
@ -17,6 +17,17 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough,
|
||||
// but nothing matches the “image” part of the provided reference.
|
||||
type ImageNotFoundError struct {
|
||||
ref ociArchiveReference
|
||||
// We may make members public, or add methods, in the future.
|
||||
}
|
||||
|
||||
func (e ImageNotFoundError) Error() string {
|
||||
return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
|
||||
}
|
||||
|
||||
type ociArchiveImageSource struct {
|
||||
impl.Compat
|
||||
|
||||
|
|
@ -35,6 +46,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiv
|
|||
|
||||
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
|
||||
if err != nil {
|
||||
var notFound ocilayout.ImageNotFoundError
|
||||
if errors.As(err, ¬Found) {
|
||||
err = ImageNotFoundError{ref: ref}
|
||||
}
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
|
||||
}
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
8
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
|
|
@ -58,13 +58,7 @@ func splitPathAndImageWindows(reference string) (string, string) {
|
|||
}
|
||||
|
||||
func splitPathAndImageNonWindows(reference string) (string, string) {
|
||||
sep := strings.SplitN(reference, ":", 2)
|
||||
path := sep[0]
|
||||
|
||||
var image string
|
||||
if len(sep) == 2 {
|
||||
image = sep[1]
|
||||
}
|
||||
path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":"
|
||||
return path, image
|
||||
}
|
||||
|
||||
|
|
|
|||
40
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
40
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
|
|
@ -12,9 +12,9 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
|
|
@ -107,11 +107,11 @@ func (d *ociImageDestination) Close() error {
|
|||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
succeeded := false
|
||||
explicitClosed := false
|
||||
|
|
@ -128,14 +128,14 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(blobFile, stream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
blobDigest := digester.Digest()
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||
|
|
@ -144,52 +144,50 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||
// always fails on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := blobFile.Chmod(0644); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
}
|
||||
|
||||
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||
blobFile.Close()
|
||||
explicitClosed = true
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
succeeded = true
|
||||
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
|
||||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, types.BlobInfo{}, nil
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,
|
||||
|
|
|
|||
16
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
16
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
|
|
@ -12,8 +12,8 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/internal/imagesource/impl"
|
||||
"github.com/containers/image/v5/internal/imagesource/stubs"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
|
|
@ -21,6 +21,17 @@ import (
|
|||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough,
|
||||
// but nothing matches the “image” part of the provided reference.
|
||||
type ImageNotFoundError struct {
|
||||
ref ociReference
|
||||
// We may make members public, or add methods, in the future.
|
||||
}
|
||||
|
||||
func (e ImageNotFoundError) Error() string {
|
||||
return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
|
||||
}
|
||||
|
||||
type ociImageSource struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
|
|
@ -83,6 +94,7 @@ func (s *ociImageSource) Reference() types.ImageReference {
|
|||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *ociImageSource) Close() error {
|
||||
s.client.CloseIdleConnections()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -96,7 +108,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
|||
var err error
|
||||
|
||||
if instanceDigest == nil {
|
||||
dig = digest.Digest(s.descriptor.Digest)
|
||||
dig = s.descriptor.Digest
|
||||
mimeType = s.descriptor.MediaType
|
||||
} else {
|
||||
dig = *instanceDigest
|
||||
|
|
|
|||
32
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
32
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
|
|
@ -100,7 +100,7 @@ func (ref ociReference) Transport() types.ImageTransport {
|
|||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref ociReference) StringWithinTransport() string {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
|
|
@ -179,35 +179,29 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
|||
return imgspecv1.Descriptor{}, err
|
||||
}
|
||||
|
||||
var d *imgspecv1.Descriptor
|
||||
if ref.image == "" {
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) == 1 {
|
||||
d = &index.Manifests[0]
|
||||
} else {
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, ErrMoreThanOneImage
|
||||
}
|
||||
return index.Manifests[0], nil
|
||||
} else {
|
||||
// if image specified, look through all manifests for a match
|
||||
var unsupportedMIMETypes []string
|
||||
for _, md := range index.Manifests {
|
||||
if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex {
|
||||
continue
|
||||
}
|
||||
refName, ok := md.Annotations[imgspecv1.AnnotationRefName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if refName == ref.image {
|
||||
d = &md
|
||||
break
|
||||
if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
|
||||
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
|
||||
return md, nil
|
||||
}
|
||||
unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
|
||||
}
|
||||
}
|
||||
if len(unsupportedMIMETypes) != 0 {
|
||||
return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
}
|
||||
}
|
||||
if d == nil {
|
||||
return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image)
|
||||
}
|
||||
return *d, nil
|
||||
return imgspecv1.Descriptor{}, ImageNotFoundError{ref}
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||
|
|
|
|||
22
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
22
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
|
|
@ -6,6 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
|
@ -19,12 +20,12 @@ type locationKey struct {
|
|||
blobDigest digest.Digest
|
||||
}
|
||||
|
||||
// cache implements an in-memory-only BlobInfoCache
|
||||
// cache implements an in-memory-only BlobInfoCache.
|
||||
type cache struct {
|
||||
mutex sync.Mutex
|
||||
// The following fields can only be accessed with mutex held.
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
|
||||
}
|
||||
|
|
@ -44,7 +45,7 @@ func New() types.BlobInfoCache {
|
|||
func new2() *cache {
|
||||
return &cache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
compressors: map[digest.Digest]string{},
|
||||
}
|
||||
|
|
@ -67,7 +68,7 @@ func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Diges
|
|||
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
|
||||
if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() {
|
||||
return anyDigest
|
||||
}
|
||||
return ""
|
||||
|
|
@ -88,10 +89,10 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
|
|||
|
||||
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
|
||||
if !ok {
|
||||
anyDigestSet = map[digest.Digest]struct{}{}
|
||||
anyDigestSet = set.New[digest.Digest]()
|
||||
mem.digestsByUncompressed[uncompressed] = anyDigestSet
|
||||
}
|
||||
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
anyDigestSet.Add(anyDigest)
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
|
|
@ -171,10 +172,11 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
for d := range otherDigests {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
|
||||
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
|
||||
for _, d := range otherDigests.Values() {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
|
|
@ -30,7 +30,7 @@ var (
|
|||
// Zstd compression.
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
|
||||
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
|
||||
// Zstd:chunked compression.
|
||||
// ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
|
||||
nil, ZstdDecompressor, compressor.ZstdCompressor)
|
||||
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
6
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
|
|
@ -44,21 +44,21 @@ func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
|
|||
}
|
||||
|
||||
// AlgorithmCompressor returns the compressor field of algo.
|
||||
// This is a function instead of a public method so that it is only callable from by code
|
||||
// This is a function instead of a public method so that it is only callable by code
|
||||
// that is allowed to import this internal subpackage.
|
||||
func AlgorithmCompressor(algo Algorithm) CompressorFunc {
|
||||
return algo.compressor
|
||||
}
|
||||
|
||||
// AlgorithmDecompressor returns the decompressor field of algo.
|
||||
// This is a function instead of a public method so that it is only callable from by code
|
||||
// This is a function instead of a public method so that it is only callable by code
|
||||
// that is allowed to import this internal subpackage.
|
||||
func AlgorithmDecompressor(algo Algorithm) DecompressorFunc {
|
||||
return algo.decompressor
|
||||
}
|
||||
|
||||
// AlgorithmPrefix returns the prefix field of algo.
|
||||
// This is a function instead of a public method so that it is only callable from by code
|
||||
// This is a function instead of a public method so that it is only callable by code
|
||||
// that is allowed to import this internal subpackage.
|
||||
func AlgorithmPrefix(algo Algorithm) []byte {
|
||||
return algo.prefix
|
||||
|
|
|
|||
323
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
323
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
|
|
@ -12,6 +12,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
|
|
@ -32,11 +33,6 @@ type dockerConfigFile struct {
|
|||
CredHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||
}
|
||||
|
||||
type authPath struct {
|
||||
path string
|
||||
legacyFormat bool
|
||||
}
|
||||
|
||||
var (
|
||||
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
|
||||
xdgConfigHomePath = filepath.FromSlash("containers/auth.json")
|
||||
|
|
@ -52,11 +48,24 @@ var (
|
|||
ErrNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
// authPath combines a path to a file with container registry credentials,
|
||||
// along with expected properties of that path (currently just whether it's
|
||||
// legacy format or not).
|
||||
type authPath struct {
|
||||
path string
|
||||
legacyFormat bool
|
||||
}
|
||||
|
||||
// newAuthPathDefault constructs an authPath in non-legacy format.
|
||||
func newAuthPathDefault(path string) authPath {
|
||||
return authPath{path: path, legacyFormat: false}
|
||||
}
|
||||
|
||||
// SetCredentials stores the username and password in a location
|
||||
// appropriate for sys and the users’ configuration.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
// Returns a human-redable description of the location that was updated.
|
||||
// Returns a human-readable description of the location that was updated.
|
||||
// NOTE: The return value is only intended to be read by humans; its form is not an API,
|
||||
// it may change (or new forms can be added) any time.
|
||||
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
|
||||
|
|
@ -78,25 +87,28 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
|||
switch helper {
|
||||
// Special-case the built-in helpers for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if ch, exists := auths.CredHelpers[key]; exists {
|
||||
desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
if ch, exists := fileContents.CredHelpers[key]; exists {
|
||||
if isNamespaced {
|
||||
return false, unsupportedNamespaceErr(ch)
|
||||
return false, "", unsupportedNamespaceErr(ch)
|
||||
}
|
||||
return false, setAuthToCredHelper(ch, key, username, password)
|
||||
desc, err := setCredsInCredHelper(ch, key, username, password)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
return false, desc, nil
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
auths.AuthConfigs[key] = newCreds
|
||||
return true, nil
|
||||
fileContents.AuthConfigs[key] = newCreds
|
||||
return true, "", nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
if isNamespaced {
|
||||
err = unsupportedNamespaceErr(helper)
|
||||
} else {
|
||||
desc = fmt.Sprintf("credential helper: %s", helper)
|
||||
err = setAuthToCredHelper(helper, key, username, password)
|
||||
desc, err = setCredsInCredHelper(helper, key, username, password)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -128,10 +140,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||
// possible sources, and then call `GetCredentials` on them. That
|
||||
// prevents us from having to reverse engineer the logic in
|
||||
// `GetCredentials`.
|
||||
allKeys := make(map[string]bool)
|
||||
addKey := func(s string) {
|
||||
allKeys[s] = true
|
||||
}
|
||||
allKeys := set.New[string]()
|
||||
|
||||
// To use GetCredentials, we must at least convert the URL forms into host names.
|
||||
// While we're at it, we’ll also canonicalize docker.io to the standard format.
|
||||
|
|
@ -146,28 +155,28 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
for _, path := range getAuthFilePaths(sys, homedir.Get()) {
|
||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||
auths, err := readJSONFile(path.path, path.legacyFormat)
|
||||
// parse returns an empty map in case the path doesn't exist.
|
||||
fileContents, err := path.parse()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err)
|
||||
}
|
||||
// Credential helpers in the auth file have a
|
||||
// direct mapping to a registry, so we can just
|
||||
// walk the map.
|
||||
for registry := range auths.CredHelpers {
|
||||
addKey(registry)
|
||||
for registry := range fileContents.CredHelpers {
|
||||
allKeys.Add(registry)
|
||||
}
|
||||
for key := range auths.AuthConfigs {
|
||||
for key := range fileContents.AuthConfigs {
|
||||
key := normalizeAuthFileKey(key, path.legacyFormat)
|
||||
if key == normalizedDockerIORegistry {
|
||||
key = "docker.io"
|
||||
}
|
||||
addKey(key)
|
||||
allKeys.Add(key)
|
||||
}
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
creds, err := listAuthsFromCredHelper(helper)
|
||||
creds, err := listCredsInCredHelper(helper)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
|
||||
if errors.Is(err, exec.ErrNotFound) {
|
||||
|
|
@ -177,26 +186,26 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||
}
|
||||
}
|
||||
for registry := range creds {
|
||||
addKey(registry)
|
||||
allKeys.Add(registry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now use `GetCredentials` to the specific auth configs for each
|
||||
// previously listed registry.
|
||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||
for key := range allKeys {
|
||||
authConf, err := GetCredentials(sys, key)
|
||||
allCreds := make(map[string]types.DockerAuthConfig)
|
||||
for _, key := range allKeys.Values() {
|
||||
creds, err := GetCredentials(sys, key)
|
||||
if err != nil {
|
||||
// Note: we rely on the logging in `GetCredentials`.
|
||||
return nil, err
|
||||
}
|
||||
if authConf != (types.DockerAuthConfig{}) {
|
||||
authConfigs[key] = authConf
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
allCreds[key] = creds
|
||||
}
|
||||
}
|
||||
|
||||
return authConfigs, nil
|
||||
return allCreds, nil
|
||||
}
|
||||
|
||||
// getAuthFilePaths returns a slice of authPaths based on the system context
|
||||
|
|
@ -205,32 +214,32 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||
// by tests.
|
||||
func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
|
||||
paths := []authPath{}
|
||||
pathToAuth, lf, err := getPathToAuth(sys)
|
||||
pathToAuth, userSpecifiedPath, err := getPathToAuth(sys)
|
||||
if err == nil {
|
||||
paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf})
|
||||
paths = append(paths, pathToAuth)
|
||||
} else {
|
||||
// Error means that the path set for XDG_RUNTIME_DIR does not exist
|
||||
// but we don't want to completely fail in the case that the user is pulling a public image
|
||||
// Logging the error as a warning instead and moving on to pulling the image
|
||||
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
|
||||
}
|
||||
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
|
||||
if xdgCfgHome == "" {
|
||||
xdgCfgHome = filepath.Join(homeDir, ".config")
|
||||
}
|
||||
paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
|
||||
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
|
||||
if !userSpecifiedPath {
|
||||
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
|
||||
if xdgCfgHome == "" {
|
||||
xdgCfgHome = filepath.Join(homeDir, ".config")
|
||||
}
|
||||
paths = append(paths, newAuthPathDefault(filepath.Join(xdgCfgHome, xdgConfigHomePath)))
|
||||
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
|
||||
paths = append(paths, newAuthPathDefault(filepath.Join(dockerConfig, "config.json")))
|
||||
} else {
|
||||
paths = append(paths,
|
||||
newAuthPathDefault(filepath.Join(homeDir, dockerHomePath)),
|
||||
)
|
||||
}
|
||||
paths = append(paths,
|
||||
authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false},
|
||||
)
|
||||
} else {
|
||||
paths = append(paths,
|
||||
authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false},
|
||||
authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
|
||||
)
|
||||
}
|
||||
paths = append(paths,
|
||||
authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
|
||||
)
|
||||
return paths
|
||||
}
|
||||
|
||||
|
|
@ -276,13 +285,13 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
|||
// Anonymous function to query credentials from auth files.
|
||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat)
|
||||
creds, err := findCredentialsInFile(key, registry, path)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, "", err
|
||||
}
|
||||
|
||||
if authConfig != (types.DockerAuthConfig{}) {
|
||||
return authConfig, path.path, nil
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
return creds, path.path, nil
|
||||
}
|
||||
}
|
||||
return types.DockerAuthConfig{}, "", nil
|
||||
|
|
@ -311,7 +320,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
|||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers, but a "registry" is a valid parent of "key".
|
||||
helperKey = registry
|
||||
creds, err = getAuthFromCredHelper(helper, registry)
|
||||
creds, err = getCredsFromCredHelper(helper, registry)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
|
||||
|
|
@ -351,14 +360,14 @@ func GetAuthentication(sys *types.SystemContext, key string) (string, string, er
|
|||
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
|
||||
// it exists only to allow testing it with an artificial home directory.
|
||||
func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
|
||||
auth, err := getCredentialsWithHomeDir(sys, key, homeDir)
|
||||
creds, err := getCredentialsWithHomeDir(sys, key, homeDir)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if auth.IdentityToken != "" {
|
||||
if creds.IdentityToken != "" {
|
||||
return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported)
|
||||
}
|
||||
return auth.Username, auth.Password, nil
|
||||
return creds.Username, creds.Password, nil
|
||||
}
|
||||
|
||||
// RemoveAuthentication removes credentials for `key` from all possible
|
||||
|
|
@ -383,17 +392,16 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
|||
if isNamespaced {
|
||||
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
|
||||
return
|
||||
} else {
|
||||
err := deleteAuthFromCredHelper(helper, key)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||
isLoggedIn = true
|
||||
return
|
||||
}
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
|
||||
return
|
||||
}
|
||||
}
|
||||
err := deleteCredsFromCredHelper(helper, key)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||
isLoggedIn = true
|
||||
return
|
||||
}
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
|
||||
return
|
||||
}
|
||||
multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
|
||||
}
|
||||
|
|
@ -403,15 +411,15 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
|||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if innerHelper, exists := auths.CredHelpers[key]; exists {
|
||||
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
if innerHelper, exists := fileContents.CredHelpers[key]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
}
|
||||
if _, ok := auths.AuthConfigs[key]; ok {
|
||||
if _, ok := fileContents.AuthConfigs[key]; ok {
|
||||
isLoggedIn = true
|
||||
delete(auths.AuthConfigs, key)
|
||||
delete(fileContents.AuthConfigs, key)
|
||||
}
|
||||
return true, multiErr
|
||||
return true, "", multiErr
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
|
|
@ -446,23 +454,23 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
for registry, helper := range auths.CredHelpers {
|
||||
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
for registry, helper := range fileContents.CredHelpers {
|
||||
// Helpers in auth files are expected
|
||||
// to exist, so no special treatment
|
||||
// for them.
|
||||
if err := deleteAuthFromCredHelper(helper, registry); err != nil {
|
||||
return false, err
|
||||
if err := deleteCredsFromCredHelper(helper, registry); err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
}
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
auths.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, nil
|
||||
fileContents.CredHelpers = make(map[string]string)
|
||||
fileContents.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, "", nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
var creds map[string]string
|
||||
creds, err = listAuthsFromCredHelper(helper)
|
||||
creds, err = listCredsInCredHelper(helper)
|
||||
if err != nil {
|
||||
if errors.Is(err, exec.ErrNotFound) {
|
||||
// It's okay if the helper doesn't exist.
|
||||
|
|
@ -472,7 +480,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
}
|
||||
}
|
||||
for registry := range creds {
|
||||
err = deleteAuthFromCredHelper(helper, registry)
|
||||
err = deleteCredsFromCredHelper(helper, registry)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
|
@ -489,34 +497,34 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
return multiErr
|
||||
}
|
||||
|
||||
func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
|
||||
func listCredsInCredHelper(credHelper string) (map[string]string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.List(p)
|
||||
}
|
||||
|
||||
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
|
||||
// returns the path, and a bool specifies whether the file is in legacy format
|
||||
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
||||
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials,
|
||||
// and a boolean indicating whether the return value came from an explicit user choice (i.e. not defaults)
|
||||
func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) {
|
||||
return getPathToAuthWithOS(sys, runtime.GOOS)
|
||||
}
|
||||
|
||||
// getPathToAuthWithOS is an internal implementation detail of getPathToAuth,
|
||||
// it exists only to allow testing it with an artificial runtime.GOOS.
|
||||
func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) {
|
||||
func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) {
|
||||
if sys != nil {
|
||||
if sys.AuthFilePath != "" {
|
||||
return sys.AuthFilePath, false, nil
|
||||
return newAuthPathDefault(sys.AuthFilePath), true, nil
|
||||
}
|
||||
if sys.LegacyFormatAuthFilePath != "" {
|
||||
return sys.LegacyFormatAuthFilePath, true, nil
|
||||
return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
|
||||
}
|
||||
if sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
|
||||
return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil
|
||||
}
|
||||
}
|
||||
if goOS == "windows" || goOS == "darwin" {
|
||||
return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil
|
||||
return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil
|
||||
}
|
||||
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
|
|
@ -528,90 +536,96 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e
|
|||
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
|
||||
// or made a typo while setting the environment variable,
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
|
||||
return "", false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err)
|
||||
return authPath{}, false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err)
|
||||
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
|
||||
return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil
|
||||
return newAuthPathDefault(filepath.Join(runtimeDir, xdgRuntimeDirPath)), false, nil
|
||||
}
|
||||
return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil
|
||||
return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
|
||||
}
|
||||
|
||||
// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
|
||||
// parse unmarshals the credentials stored in the auth.json file and returns it
|
||||
// or returns an empty dockerConfigFile data structure if auth.json does not exist
|
||||
// if the file exists and is empty, readJSONFile returns an error
|
||||
func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
||||
var auths dockerConfigFile
|
||||
// if the file exists and is empty, this function returns an error.
|
||||
func (path authPath) parse() (dockerConfigFile, error) {
|
||||
var fileContents dockerConfigFile
|
||||
|
||||
raw, err := os.ReadFile(path)
|
||||
raw, err := os.ReadFile(path.path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return auths, nil
|
||||
fileContents.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return fileContents, nil
|
||||
}
|
||||
return dockerConfigFile{}, err
|
||||
}
|
||||
|
||||
if legacyFormat {
|
||||
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
|
||||
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
|
||||
if path.legacyFormat {
|
||||
if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil {
|
||||
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
|
||||
}
|
||||
return auths, nil
|
||||
return fileContents, nil
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(raw, &auths); err != nil {
|
||||
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
|
||||
if err = json.Unmarshal(raw, &fileContents); err != nil {
|
||||
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
|
||||
}
|
||||
|
||||
if auths.AuthConfigs == nil {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
if fileContents.AuthConfigs == nil {
|
||||
fileContents.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
}
|
||||
if auths.CredHelpers == nil {
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
if fileContents.CredHelpers == nil {
|
||||
fileContents.CredHelpers = make(map[string]string)
|
||||
}
|
||||
|
||||
return auths, nil
|
||||
return fileContents, nil
|
||||
}
|
||||
|
||||
// modifyJSON finds an auth.json file, calls editor on the contents, and
|
||||
// writes it back if editor returns true.
|
||||
// Returns a human-redable description of the file, to be returned by SetCredentials.
|
||||
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) (string, error) {
|
||||
path, legacyFormat, err := getPathToAuth(sys)
|
||||
// Returns a human-readable description of the file, to be returned by SetCredentials.
|
||||
//
|
||||
// The editor may also return a human-readable description of the updated location; if it is "",
|
||||
// the file itself is used.
|
||||
func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
|
||||
path, _, err := getPathToAuth(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if legacyFormat {
|
||||
return "", fmt.Errorf("writes to %s using legacy format are not supported", path)
|
||||
if path.legacyFormat {
|
||||
return "", fmt.Errorf("writes to %s using legacy format are not supported", path.path)
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
dir := filepath.Dir(path.path)
|
||||
if err = os.MkdirAll(dir, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
auths, err := readJSONFile(path, false)
|
||||
fileContents, err := path.parse()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("reading JSON file %q: %w", path, err)
|
||||
return "", fmt.Errorf("reading JSON file %q: %w", path.path, err)
|
||||
}
|
||||
|
||||
updated, err := editor(&auths)
|
||||
updated, description, err := editor(&fileContents)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("updating %q: %w", path, err)
|
||||
return "", fmt.Errorf("updating %q: %w", path.path, err)
|
||||
}
|
||||
if updated {
|
||||
newData, err := json.MarshalIndent(auths, "", "\t")
|
||||
newData, err := json.MarshalIndent(fileContents, "", "\t")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
|
||||
return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err)
|
||||
}
|
||||
|
||||
if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
|
||||
return "", fmt.Errorf("writing to file %q: %w", path, err)
|
||||
if err = ioutils.AtomicWriteFile(path.path, newData, 0600); err != nil {
|
||||
return "", fmt.Errorf("writing to file %q: %w", path.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return path, nil
|
||||
if description == "" {
|
||||
description = path.path
|
||||
}
|
||||
return description, nil
|
||||
}
|
||||
|
||||
func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds, err := helperclient.Get(p, registry)
|
||||
|
|
@ -636,7 +650,9 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig,
|
|||
}
|
||||
}
|
||||
|
||||
func setAuthToCredHelper(credHelper, registry, username, password string) error {
|
||||
// setCredsInCredHelper stores (username, password) for registry in credHelper.
|
||||
// Returns a human-readable description of the destination, to be returned by SetCredentials.
|
||||
func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds := &credentials.Credentials{
|
||||
|
|
@ -644,10 +660,13 @@ func setAuthToCredHelper(credHelper, registry, username, password string) error
|
|||
Username: username,
|
||||
Secret: password,
|
||||
}
|
||||
return helperclient.Store(p, creds)
|
||||
if err := helperclient.Store(p, creds); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("credential helper: %s", credHelper), nil
|
||||
}
|
||||
|
||||
func deleteAuthFromCredHelper(credHelper, registry string) error {
|
||||
func deleteCredsFromCredHelper(credHelper, registry string) error {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.Erase(p, registry)
|
||||
|
|
@ -655,25 +674,25 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
|
|||
|
||||
// findCredentialsInFile looks for credentials matching "key"
|
||||
// (which is "registry" or a namespace in "registry") in "path".
|
||||
func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
|
||||
auths, err := readJSONFile(path, legacyFormat)
|
||||
func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) {
|
||||
fileContents, err := path.parse()
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path, err)
|
||||
return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err)
|
||||
}
|
||||
|
||||
// First try cred helpers. They should always be normalized.
|
||||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path)
|
||||
return getAuthFromCredHelper(ch, registry)
|
||||
if ch, exists := fileContents.CredHelpers[registry]; exists {
|
||||
logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path)
|
||||
return getCredsFromCredHelper(ch, registry)
|
||||
}
|
||||
|
||||
// Support sub-registry namespaces in auth.
|
||||
// (This is not a feature of ~/.docker/config.json; we support it even for
|
||||
// those files as an extension.)
|
||||
var keys []string
|
||||
if !legacyFormat {
|
||||
if !path.legacyFormat {
|
||||
keys = authKeysForKey(key)
|
||||
} else {
|
||||
keys = []string{registry}
|
||||
|
|
@ -682,8 +701,8 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
|
|||
// Repo or namespace keys are only supported as exact matches. For registry
|
||||
// keys we prefer exact matches as well.
|
||||
for _, key := range keys {
|
||||
if val, exists := auths.AuthConfigs[key]; exists {
|
||||
return decodeDockerAuth(val)
|
||||
if val, exists := fileContents.AuthConfigs[key]; exists {
|
||||
return decodeDockerAuth(path.path, key, val)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -696,15 +715,15 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
|
|||
// The docker.io registry still uses the /v1/ key with a special host name,
|
||||
// so account for that as well.
|
||||
registry = normalizeRegistry(registry)
|
||||
for k, v := range auths.AuthConfigs {
|
||||
if normalizeAuthFileKey(k, legacyFormat) == registry {
|
||||
return decodeDockerAuth(v)
|
||||
for k, v := range fileContents.AuthConfigs {
|
||||
if normalizeAuthFileKey(k, path.legacyFormat) == registry {
|
||||
return decodeDockerAuth(path.path, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Only log this if we found nothing; getCredentialsWithHomeDir logs the
|
||||
// source of found data.
|
||||
logrus.Debugf("No credentials matching %s found in %s", key, path)
|
||||
logrus.Debugf("No credentials matching %s found in %s", key, path.path)
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -729,22 +748,26 @@ func authKeysForKey(key string) (res []string) {
|
|||
return res
|
||||
}
|
||||
|
||||
// decodeDockerAuth decodes the username and password, which is
|
||||
// encoded in base64.
|
||||
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||
// decodeDockerAuth decodes the username and password from conf,
|
||||
// which is entry key in path.
|
||||
func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
parts := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
user, passwordPart, valid := strings.Cut(string(decoded), ":")
|
||||
if !valid {
|
||||
// if it's invalid just skip, as docker does
|
||||
if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those
|
||||
logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log.
|
||||
} else {
|
||||
logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path)
|
||||
}
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
user := parts[0]
|
||||
password := strings.Trim(parts[1], "\x00")
|
||||
password := strings.Trim(passwordPart, "\x00")
|
||||
return types.DockerAuthConfig{
|
||||
Username: user,
|
||||
Password: password,
|
||||
|
|
@ -759,7 +782,7 @@ func normalizeAuthFileKey(key string, legacyFormat bool) string {
|
|||
stripped = strings.TrimPrefix(stripped, "https://")
|
||||
|
||||
if legacyFormat || stripped != key {
|
||||
stripped = strings.SplitN(stripped, "/", 2)[0]
|
||||
stripped, _, _ = strings.Cut(stripped, "/")
|
||||
}
|
||||
|
||||
return normalizeRegistry(stripped)
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
9
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// defaultShortNameMode is the default mode of registries.conf files if the
|
||||
|
|
@ -308,9 +309,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
|
|||
// updateWithConfigurationFrom updates c with configuration from updates.
|
||||
// In case of conflict, updates is preferred.
|
||||
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
|
||||
for name, value := range updates.namedAliases {
|
||||
c.namedAliases[name] = value
|
||||
}
|
||||
maps.Copy(c.namedAliases, updates.namedAliases)
|
||||
}
|
||||
|
||||
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
|
||||
|
|
@ -335,7 +334,7 @@ func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAli
|
|||
return &conf, cache, nil
|
||||
}
|
||||
|
||||
func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile.Locker, error) {
|
||||
func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) {
|
||||
shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
|
|
@ -346,6 +345,6 @@ func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile
|
|||
}
|
||||
|
||||
lockPath := shortNameAliasesConfPath + ".lock"
|
||||
locker, err := lockfile.GetLockfile(lockPath)
|
||||
locker, err := lockfile.GetLockFile(lockPath)
|
||||
return shortNameAliasesConfPath, locker, err
|
||||
}
|
||||
|
|
|
|||
39
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
39
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
|
|
@ -6,7 +6,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
|
@ -15,7 +14,9 @@ import (
|
|||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// systemRegistriesConfPath is the path to the system-wide registry
|
||||
|
|
@ -198,6 +199,7 @@ type V1RegistriesConf struct {
|
|||
}
|
||||
|
||||
// Nonempty returns true if config contains at least one configuration entry.
|
||||
// Empty arrays are treated as missing entries.
|
||||
func (config *V1RegistriesConf) Nonempty() bool {
|
||||
copy := *config // A shallow copy
|
||||
if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 {
|
||||
|
|
@ -209,7 +211,15 @@ func (config *V1RegistriesConf) Nonempty() bool {
|
|||
if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 {
|
||||
copy.V1TOMLConfig.Block.Registries = nil
|
||||
}
|
||||
return !reflect.DeepEqual(copy, V1RegistriesConf{})
|
||||
return copy.hasSetField()
|
||||
}
|
||||
|
||||
// hasSetField returns true if config contains at least one configuration entry.
|
||||
// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
|
||||
// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
|
||||
// as a non-nil []string{}.
|
||||
func (config *V1RegistriesConf) hasSetField() bool {
|
||||
return !reflect.DeepEqual(*config, V1RegistriesConf{})
|
||||
}
|
||||
|
||||
// V2RegistriesConf is the sysregistries v2 configuration format.
|
||||
|
|
@ -257,7 +267,15 @@ func (config *V2RegistriesConf) Nonempty() bool {
|
|||
if !copy.shortNameAliasConf.nonempty() {
|
||||
copy.shortNameAliasConf = shortNameAliasConf{}
|
||||
}
|
||||
return !reflect.DeepEqual(copy, V2RegistriesConf{})
|
||||
return copy.hasSetField()
|
||||
}
|
||||
|
||||
// hasSetField returns true if config contains at least one configuration entry.
|
||||
// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
|
||||
// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
|
||||
// as a non-nil []string{}.
|
||||
func (config *V2RegistriesConf) hasSetField() bool {
|
||||
return !reflect.DeepEqual(*config, V2RegistriesConf{})
|
||||
}
|
||||
|
||||
// parsedConfig is the result of parsing, and possibly merging, configuration files;
|
||||
|
|
@ -367,7 +385,7 @@ func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) {
|
|||
}
|
||||
|
||||
// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries.
|
||||
var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
|
||||
var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
|
||||
|
||||
// postProcess checks the consistency of all the configuration, looks for conflicts,
|
||||
// and normalizes the configuration (e.g., sets the Prefix to Location if not set).
|
||||
|
|
@ -923,15 +941,15 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
|
|||
logrus.Debugf("Failed to decode keys %q from %q", keys, path)
|
||||
}
|
||||
|
||||
if combinedTOML.V1RegistriesConf.Nonempty() {
|
||||
if combinedTOML.V1RegistriesConf.hasSetField() {
|
||||
// Enforce the v2 format if requested.
|
||||
if forceV2 {
|
||||
return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"}
|
||||
}
|
||||
|
||||
// Convert a v1 config into a v2 config.
|
||||
if combinedTOML.V2RegistriesConf.Nonempty() {
|
||||
return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
|
||||
if combinedTOML.V2RegistriesConf.hasSetField() {
|
||||
return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)}
|
||||
}
|
||||
converted, err := combinedTOML.V1RegistriesConf.ConvertToV2()
|
||||
if err != nil {
|
||||
|
|
@ -1002,12 +1020,9 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
|
|||
// Go maps have a non-deterministic order when iterating the keys, so
|
||||
// we dump them in a slice and sort it to enforce some order in
|
||||
// Registries slice. Some consumers of c/image (e.g., CRI-O) log the
|
||||
// the configuration where a non-deterministic order could easily cause
|
||||
// configuration where a non-deterministic order could easily cause
|
||||
// confusion.
|
||||
prefixes := []string{}
|
||||
for prefix := range registryMap {
|
||||
prefixes = append(prefixes, prefix)
|
||||
}
|
||||
prefixes := maps.Keys(registryMap)
|
||||
sort.Strings(prefixes)
|
||||
|
||||
c.partialV2.Registries = []Registry{}
|
||||
|
|
|
|||
23
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
23
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package tlsclientconfig
|
|||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
|
@ -10,9 +11,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
|
||||
|
|
@ -47,7 +47,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
return err
|
||||
}
|
||||
if tlsc.RootCAs == nil {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
systemPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get system cert pool: %w", err)
|
||||
}
|
||||
|
|
@ -81,12 +81,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
}
|
||||
|
||||
func hasFile(files []os.DirEntry, name string) bool {
|
||||
for _, f := range files {
|
||||
if f.Name() == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(files, func(f os.DirEntry) bool {
|
||||
return f.Name() == name
|
||||
})
|
||||
}
|
||||
|
||||
// NewTransport Creates a default transport
|
||||
|
|
@ -94,17 +91,13 @@ func NewTransport() *http.Transport {
|
|||
direct := &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: direct.DialContext,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
if _, err := sockets.DialerFromEnvironment(direct); err != nil {
|
||||
logrus.Debugf("Can't execute DialerFromEnvironment: %v", err)
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
MaxIdleConns: 100,
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
|
|
|||
22
vendor/github.com/containers/image/v5/signature/docker.go
generated
vendored
22
vendor/github.com/containers/image/v5/signature/docker.go
generated
vendored
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// SignOptions includes optional parameters for signing container images.
|
||||
|
|
@ -50,15 +51,26 @@ func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism,
|
|||
// using mech.
|
||||
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
|
||||
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
|
||||
sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity})
|
||||
return sig, err
|
||||
}
|
||||
|
||||
// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities
|
||||
// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that
|
||||
// was used to verify it.
|
||||
func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte,
|
||||
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) {
|
||||
expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, "", err
|
||||
}
|
||||
var matchedKeyIdentity string
|
||||
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
|
||||
validateKeyIdentity: func(keyIdentity string) error {
|
||||
if keyIdentity != expectedKeyIdentity {
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity))
|
||||
if !slices.Contains(expectedKeyIdentities, keyIdentity) {
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities))
|
||||
}
|
||||
matchedKeyIdentity = keyIdentity
|
||||
return nil
|
||||
},
|
||||
validateSignedDockerReference: func(signedDockerReference string) error {
|
||||
|
|
@ -84,7 +96,7 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, "", err
|
||||
}
|
||||
return sig, nil
|
||||
return sig, matchedKeyIdentity, err
|
||||
}
|
||||
|
|
|
|||
204
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
Normal file
204
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
package signature
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/sigstore/fulcio/pkg/certificate"
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
|
||||
// Users should call validate() on the policy before using it.
|
||||
type fulcioTrustRoot struct {
|
||||
caCertificates *x509.CertPool
|
||||
oidcIssuer string
|
||||
subjectEmail string
|
||||
}
|
||||
|
||||
func (f *fulcioTrustRoot) validate() error {
|
||||
if f.oidcIssuer == "" {
|
||||
return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer")
|
||||
}
|
||||
if f.subjectEmail == "" {
|
||||
return errors.New("Internal inconsistency: Fulcio use set up without subject email")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate;
|
||||
// it fails if the extension is not present in the certificate, or on any inconsistency.
|
||||
func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) {
|
||||
// == Validate the recorded OIDC issuer
|
||||
gotOIDCIssuer1 := false
|
||||
gotOIDCIssuer2 := false
|
||||
var oidcIssuer1, oidcIssuer2 string
|
||||
// certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies
|
||||
// between certificate.OIDIssuer and certificate.OIDIssuerV2.
|
||||
// Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19,
|
||||
// reject duplicates manually.
|
||||
for _, untrustedExt := range untrustedCertificate.Extensions {
|
||||
if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it.
|
||||
if gotOIDCIssuer1 {
|
||||
// Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
|
||||
// already in ParseCertificate.
|
||||
return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension")
|
||||
}
|
||||
oidcIssuer1 = string(untrustedExt.Value)
|
||||
gotOIDCIssuer1 = true
|
||||
} else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) {
|
||||
if gotOIDCIssuer2 {
|
||||
// Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions
|
||||
// already in ParseCertificate.
|
||||
return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension")
|
||||
}
|
||||
rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2)
|
||||
if err != nil {
|
||||
return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err))
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data")
|
||||
}
|
||||
gotOIDCIssuer2 = true
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case gotOIDCIssuer1 && gotOIDCIssuer2:
|
||||
if oidcIssuer1 != oidcIssuer2 {
|
||||
return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v",
|
||||
oidcIssuer1, oidcIssuer2))
|
||||
}
|
||||
return oidcIssuer1, nil
|
||||
case gotOIDCIssuer1:
|
||||
return oidcIssuer1, nil
|
||||
case gotOIDCIssuer2:
|
||||
return oidcIssuer2, nil
|
||||
default:
|
||||
return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension")
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) {
|
||||
// == Verify the certificate is correctly signed
|
||||
var untrustedIntermediatePool *x509.CertPool // = nil
|
||||
// untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar,
|
||||
// but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load
|
||||
// the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert
|
||||
// for intermediate certificates.
|
||||
if len(untrustedIntermediateChainBytes) > 0 {
|
||||
untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes)
|
||||
if err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err))
|
||||
}
|
||||
untrustedIntermediatePool = x509.NewCertPool()
|
||||
if len(untrustedIntermediateChain) > 1 {
|
||||
for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] {
|
||||
untrustedIntermediatePool.AddCert(untrustedIntermediateCert)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes)
|
||||
if err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err))
|
||||
}
|
||||
switch len(untrustedLeafCerts) {
|
||||
case 0:
|
||||
return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data")
|
||||
case 1:
|
||||
break // OK
|
||||
default:
|
||||
return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data")
|
||||
}
|
||||
untrustedCertificate := untrustedLeafCerts[0]
|
||||
|
||||
// Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs;
|
||||
// we match SAN ourselves, so override that.
|
||||
if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 {
|
||||
var remaining []asn1.ObjectIdentifier
|
||||
for _, oid := range untrustedCertificate.UnhandledCriticalExtensions {
|
||||
if !oid.Equal(cryptoutils.SANOID) {
|
||||
remaining = append(remaining, oid)
|
||||
}
|
||||
}
|
||||
untrustedCertificate.UnhandledCriticalExtensions = remaining
|
||||
}
|
||||
|
||||
if _, err := untrustedCertificate.Verify(x509.VerifyOptions{
|
||||
Intermediates: untrustedIntermediatePool,
|
||||
Roots: f.caCertificates,
|
||||
// NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation),
|
||||
// and validates the leaf certificate against relevantTime manually.
|
||||
// We verify the full certificate chain against relevantTime instead.
|
||||
// Assuming the certificate is fulcio-generated and very short-lived, that should make little difference.
|
||||
CurrentTime: relevantTime,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
|
||||
}); err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err))
|
||||
}
|
||||
|
||||
// Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied).
|
||||
//
|
||||
// We don’t currently do that.
|
||||
//
|
||||
// At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that
|
||||
// already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by
|
||||
// certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor.
|
||||
//
|
||||
// Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly
|
||||
// better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative
|
||||
// log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually
|
||||
// logging in using OpenID are not going to maintain a record of those actions.
|
||||
//
|
||||
// Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures
|
||||
// by correctly-issued certificates.
|
||||
//
|
||||
// So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition,
|
||||
// the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to
|
||||
// make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
|
||||
|
||||
// == Validate the recorded OIDC issuer
|
||||
oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if oidcIssuer != f.oidcIssuer {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer))
|
||||
}
|
||||
|
||||
// == Validate the OIDC subject
|
||||
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
|
||||
f.subjectEmail,
|
||||
untrustedCertificate.EmailAddresses))
|
||||
}
|
||||
// FIXME: Match more subject types? Cosign does:
|
||||
// - .DNSNames (can’t be issued by Fulcio)
|
||||
// - .IPAddresses (can’t be issued by Fulcio)
|
||||
// - .URIs (CAN be issued by Fulcio)
|
||||
// - OtherName values in SAN (CAN be issued by Fulcio)
|
||||
// - Various values about GitHub workflows (CAN be issued by Fulcio)
|
||||
// What does it… mean to get an OAuth2 identity for an IP address?
|
||||
// FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for
|
||||
// issuers and/or subjects and/or combinations? Regexps? More?
|
||||
|
||||
return untrustedCertificate.PublicKey, nil
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes,
|
||||
untrustedBase64Signature, untrustedPayloadBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes)
|
||||
}
|
||||
20
vendor/github.com/containers/image/v5/signature/internal/json.go
generated
vendored
20
vendor/github.com/containers/image/v5/signature/internal/json.go
generated
vendored
|
|
@ -5,6 +5,8 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
)
|
||||
|
||||
// JSONFormatError is returned when JSON does not match expected format.
|
||||
|
|
@ -20,8 +22,8 @@ func (err JSONFormatError) Error() string {
|
|||
//
|
||||
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
|
||||
// we could use reflection to automate this. Later?
|
||||
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
|
||||
seenKeys := map[string]struct{}{}
|
||||
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error {
|
||||
seenKeys := set.New[string]()
|
||||
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
t, err := dec.Token()
|
||||
|
|
@ -45,10 +47,10 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
|
|||
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
|
||||
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
|
||||
}
|
||||
if _, ok := seenKeys[key]; ok {
|
||||
if seenKeys.Contains(key) {
|
||||
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
|
||||
}
|
||||
seenKeys[key] = struct{}{}
|
||||
seenKeys.Add(key)
|
||||
|
||||
valuePtr := fieldResolver(key)
|
||||
if valuePtr == nil {
|
||||
|
|
@ -68,11 +70,11 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
|
|||
// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
|
||||
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
|
||||
// must be present exactly once, and none other fields are accepted.
|
||||
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
|
||||
seenKeys := map[string]struct{}{}
|
||||
if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
|
||||
seenKeys := set.New[string]()
|
||||
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
if valuePtr, ok := exactFields[key]; ok {
|
||||
seenKeys[key] = struct{}{}
|
||||
seenKeys.Add(key)
|
||||
return valuePtr
|
||||
}
|
||||
return nil
|
||||
|
|
@ -80,7 +82,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
|
|||
return err
|
||||
}
|
||||
for key := range exactFields {
|
||||
if _, ok := seenKeys[key]; !ok {
|
||||
if !seenKeys.Contains(key) {
|
||||
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
237
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
Normal file
237
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
Normal file
|
|
@ -0,0 +1,237 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
|
||||
"github.com/sigstore/rekor/pkg/generated/models"
|
||||
)
|
||||
|
||||
// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema.
|
||||
// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies.
|
||||
const HashedRekordV001APIVersion = "0.0.1"
|
||||
|
||||
// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET
|
||||
// (note that this a signature-specific format, not a format directly used by the Rekor API).
|
||||
// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder.
|
||||
type UntrustedRekorSET struct {
|
||||
UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload
|
||||
UntrustedPayload json.RawMessage
|
||||
}
|
||||
|
||||
type UntrustedRekorPayload struct {
|
||||
Body []byte // In cosign, this is an any, but only a string works
|
||||
IntegratedTime int64
|
||||
LogIndex int64
|
||||
LogID string
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
|
||||
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp,
|
||||
"Payload": &s.UntrustedPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler
|
||||
var _ json.Marshaler = UntrustedRekorSET{}
|
||||
var _ json.Marshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]any{
|
||||
"SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp,
|
||||
"Payload": s.UntrustedPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
|
||||
err := p.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error {
|
||||
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"body": &p.Body,
|
||||
"integratedTime": &p.IntegratedTime,
|
||||
"logIndex": &p.LogIndex,
|
||||
"logID": &p.LogID,
|
||||
})
|
||||
}
|
||||
|
||||
// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler
|
||||
var _ json.Marshaler = UntrustedRekorPayload{}
|
||||
var _ json.Marshaler = (*UntrustedRekorPayload)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]any{
|
||||
"body": p.Body,
|
||||
"integratedTime": p.IntegratedTime,
|
||||
"logIndex": p.LogIndex,
|
||||
"logID": p.LogID,
|
||||
})
|
||||
}
|
||||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
// FIXME: Should the publicKey parameter hard-code ecdsa?
|
||||
|
||||
// == Parse SET bytes
|
||||
var untrustedSET UntrustedRekorSET
|
||||
// Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature...
|
||||
if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil {
|
||||
return time.Time{}, NewInvalidSignatureError(err.Error())
|
||||
}
|
||||
// == Verify SET signature
|
||||
// Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary,
|
||||
// assuming jsoncanonicalizer is designed to operate on untrusted data.
|
||||
untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload)
|
||||
if err != nil {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err))
|
||||
}
|
||||
untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes)
|
||||
if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
|
||||
return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed")
|
||||
}
|
||||
|
||||
// == Parse SET payload
|
||||
// Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation,
|
||||
// to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker
|
||||
// could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing
|
||||
// of the SET payload.
|
||||
var rekorPayload UntrustedRekorPayload
|
||||
if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error()))
|
||||
}
|
||||
// FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal
|
||||
// hashedRekor.Spec and so on.
|
||||
// Especially if we anticipate needing to decode different data formats…
|
||||
// That would also allow being much more strict about JSON.
|
||||
//
|
||||
// Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place.
|
||||
var hashedRekord models.Hashedrekord
|
||||
if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err))
|
||||
}
|
||||
// The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us.
|
||||
if hashedRekord.APIVersion == nil {
|
||||
return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version")
|
||||
}
|
||||
if *hashedRekord.APIVersion != HashedRekordV001APIVersion {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion))
|
||||
}
|
||||
hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec)
|
||||
if err != nil {
|
||||
// Coverage: hashedRekord.Spec is an any that was just unmarshaled,
|
||||
// so this should never fail.
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err))
|
||||
}
|
||||
var hashedRekordV001 models.HashedrekordV001Schema
|
||||
if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err))
|
||||
}
|
||||
|
||||
// == Match unverifiedKeyOrCertBytes
|
||||
if hashedRekordV001.Signature == nil {
|
||||
return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`)
|
||||
}
|
||||
if hashedRekordV001.Signature.PublicKey == nil {
|
||||
return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`)
|
||||
|
||||
}
|
||||
rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content)
|
||||
if rekorKeyOrCertPEM == nil {
|
||||
return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format")
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data")
|
||||
}
|
||||
// FIXME: For public keys, let the caller provide the DER-formatted blob instead
|
||||
// of round-tripping through PEM.
|
||||
unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes)
|
||||
if unverifiedKeyOrCertPEM == nil {
|
||||
return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format")
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data")
|
||||
}
|
||||
// NOTE: This compares the PEM payload, but not the object type or headers.
|
||||
if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) {
|
||||
return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match")
|
||||
}
|
||||
// == Match unverifiedSignatureBytes
|
||||
unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
|
||||
if err != nil {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err))
|
||||
}
|
||||
if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v",
|
||||
string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes)))
|
||||
}
|
||||
|
||||
// == Match unverifiedPayloadBytes
|
||||
if hashedRekordV001.Data == nil {
|
||||
return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`)
|
||||
}
|
||||
if hashedRekordV001.Data.Hash == nil {
|
||||
return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`)
|
||||
}
|
||||
if hashedRekordV001.Data.Hash.Algorithm == nil {
|
||||
return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`)
|
||||
}
|
||||
if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm))
|
||||
}
|
||||
if hashedRekordV001.Data.Hash.Value == nil {
|
||||
return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`)
|
||||
}
|
||||
rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value)
|
||||
if err != nil {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err))
|
||||
|
||||
}
|
||||
unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes)
|
||||
if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) {
|
||||
return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match")
|
||||
}
|
||||
|
||||
// == All OK; return the relevant time.
|
||||
return time.Unix(rekorPayload.IntegratedTime, 0), nil
|
||||
}
|
||||
63
vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
generated
vendored
63
vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
generated
vendored
|
|
@ -21,14 +21,14 @@ const (
|
|||
|
||||
// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
|
||||
type UntrustedSigstorePayload struct {
|
||||
UntrustedDockerManifestDigest digest.Digest
|
||||
UntrustedDockerReference string // FIXME: more precise type?
|
||||
UntrustedCreatorID *string
|
||||
untrustedDockerManifestDigest digest.Digest
|
||||
untrustedDockerReference string // FIXME: more precise type?
|
||||
untrustedCreatorID *string
|
||||
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
|
||||
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
|
||||
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
|
||||
// we would add another field, UntrustedTimestampNS int64.
|
||||
UntrustedTimestamp *int64
|
||||
untrustedTimestamp *int64
|
||||
}
|
||||
|
||||
// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
|
||||
|
|
@ -39,34 +39,35 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer
|
|||
creatorID := "containers/image " + version.Version
|
||||
timestamp := time.Now().Unix()
|
||||
return UntrustedSigstorePayload{
|
||||
UntrustedDockerManifestDigest: dockerManifestDigest,
|
||||
UntrustedDockerReference: dockerReference,
|
||||
UntrustedCreatorID: &creatorID,
|
||||
UntrustedTimestamp: ×tamp,
|
||||
untrustedDockerManifestDigest: dockerManifestDigest,
|
||||
untrustedDockerReference: dockerReference,
|
||||
untrustedCreatorID: &creatorID,
|
||||
untrustedTimestamp: ×tamp,
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check that UntrustedSigstorePayload implements json.Marshaler
|
||||
// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler
|
||||
var _ json.Marshaler = UntrustedSigstorePayload{}
|
||||
var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
|
||||
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
|
||||
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
|
||||
return nil, errors.New("Unexpected empty signature content")
|
||||
}
|
||||
critical := map[string]interface{}{
|
||||
critical := map[string]any{
|
||||
"type": sigstoreSignatureType,
|
||||
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
|
||||
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
|
||||
}
|
||||
optional := map[string]interface{}{}
|
||||
if s.UntrustedCreatorID != nil {
|
||||
optional["creator"] = *s.UntrustedCreatorID
|
||||
optional := map[string]any{}
|
||||
if s.untrustedCreatorID != nil {
|
||||
optional["creator"] = *s.untrustedCreatorID
|
||||
}
|
||||
if s.UntrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.UntrustedTimestamp
|
||||
if s.untrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.untrustedTimestamp
|
||||
}
|
||||
signature := map[string]interface{}{
|
||||
signature := map[string]any{
|
||||
"critical": critical,
|
||||
"optional": optional,
|
||||
}
|
||||
|
|
@ -91,7 +92,7 @@ func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
|
|||
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
||||
var critical, optional json.RawMessage
|
||||
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"critical": &critical,
|
||||
"optional": &optional,
|
||||
}); err != nil {
|
||||
|
|
@ -103,7 +104,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
var gotCreatorID, gotTimestamp = false, false
|
||||
// /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
|
||||
if !bytes.Equal(optional, []byte("null")) {
|
||||
if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
|
||||
if err := ParanoidUnmarshalJSONObject(optional, func(key string) any {
|
||||
switch key {
|
||||
case "creator":
|
||||
gotCreatorID = true
|
||||
|
|
@ -112,7 +113,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
gotTimestamp = true
|
||||
return ×tamp
|
||||
default:
|
||||
var ignore interface{}
|
||||
var ignore any
|
||||
return &ignore
|
||||
}
|
||||
}); err != nil {
|
||||
|
|
@ -120,19 +121,19 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
}
|
||||
}
|
||||
if gotCreatorID {
|
||||
s.UntrustedCreatorID = &creatorID
|
||||
s.untrustedCreatorID = &creatorID
|
||||
}
|
||||
if gotTimestamp {
|
||||
intTimestamp := int64(timestamp)
|
||||
if float64(intTimestamp) != timestamp {
|
||||
return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
|
||||
}
|
||||
s.UntrustedTimestamp = &intTimestamp
|
||||
s.untrustedTimestamp = &intTimestamp
|
||||
}
|
||||
|
||||
var t string
|
||||
var image, identity json.RawMessage
|
||||
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
|
||||
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
|
||||
"type": &t,
|
||||
"image": &image,
|
||||
"identity": &identity,
|
||||
|
|
@ -144,15 +145,15 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
var digestString string
|
||||
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
|
||||
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
|
||||
"docker-manifest-digest": &digestString,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
|
||||
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
|
||||
"docker-reference": &s.UntrustedDockerReference,
|
||||
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||
"docker-reference": &s.untrustedDockerReference,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -190,10 +191,10 @@ func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte,
|
|||
if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
|
||||
return nil, NewInvalidSignatureError(err.Error())
|
||||
}
|
||||
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil {
|
||||
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil {
|
||||
if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// SigstorePayloadAcceptanceRules have accepted this value.
|
||||
|
|
|
|||
135
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
135
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
|
|
@ -19,13 +19,13 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
|
||||
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
|
||||
|
|
@ -104,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil)
|
|||
func (p *Policy) UnmarshalJSON(data []byte) error {
|
||||
*p = Policy{}
|
||||
transports := policyTransportsMap{}
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
switch key {
|
||||
case "default":
|
||||
return &p.Default
|
||||
|
|
@ -135,7 +135,7 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
|
|||
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
|
||||
// So, use a temporary map of pointers-to-slices and convert.
|
||||
tmpMap := map[string]*PolicyTransportScopes{}
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
// transport can be nil
|
||||
transport := transports.Get(key)
|
||||
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
|
||||
|
|
@ -181,7 +181,7 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
|
|||
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
|
||||
// So, use a temporary map of pointers-to-slices and convert.
|
||||
tmpMap := map[string]*PolicyRequirements{}
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
|
||||
if _, ok := tmpMap[key]; ok {
|
||||
return nil
|
||||
|
|
@ -271,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
|
|||
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
||||
*pr = prInsecureAcceptAnything{}
|
||||
var tmp prInsecureAcceptAnything
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
|
|
@ -301,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil)
|
|||
func (pr *prReject) UnmarshalJSON(data []byte) error {
|
||||
*pr = prReject{}
|
||||
var tmp prReject
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
|
|
@ -384,7 +384,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
|
|||
var tmp prSignedBy
|
||||
var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
|
||||
var signedIdentity json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
|
|
@ -495,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
|||
*pr = prSignedBaseLayer{}
|
||||
var tmp prSignedBaseLayer
|
||||
var baseLayerIdentity json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
"baseLayerIdentity": &baseLayerIdentity,
|
||||
}); err != nil {
|
||||
|
|
@ -518,107 +518,6 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// newPRSigstoreSigned returns a new prSigstoreSigned if parameters are valid.
|
||||
func newPRSigstoreSigned(keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
|
||||
if len(keyPath) > 0 && len(keyData) > 0 {
|
||||
return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
return nil, InvalidPolicyFormatError("signedIdentity not specified")
|
||||
}
|
||||
return &prSigstoreSigned{
|
||||
prCommon: prCommon{Type: prTypeSigstoreSigned},
|
||||
KeyPath: keyPath,
|
||||
KeyData: keyData,
|
||||
SignedIdentity: signedIdentity,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newPRSigstoreSignedKeyPath is NewPRSigstoreSignedKeyPath, except it returns the private type.
|
||||
func newPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
|
||||
return newPRSigstoreSigned(keyPath, nil, signedIdentity)
|
||||
}
|
||||
|
||||
// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath
|
||||
func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
|
||||
return newPRSigstoreSignedKeyPath(keyPath, signedIdentity)
|
||||
}
|
||||
|
||||
// newPRSigstoreSignedKeyData is NewPRSigstoreSignedKeyData, except it returns the private type.
|
||||
func newPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) {
|
||||
return newPRSigstoreSigned("", keyData, signedIdentity)
|
||||
}
|
||||
|
||||
// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData
|
||||
func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
|
||||
return newPRSigstoreSignedKeyData(keyData, signedIdentity)
|
||||
}
|
||||
|
||||
// Compile-time check that prSigstoreSigned implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
||||
*pr = prSigstoreSigned{}
|
||||
var tmp prSigstoreSigned
|
||||
var gotKeyPath, gotKeyData = false, false
|
||||
var signedIdentity json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "keyPath":
|
||||
gotKeyPath = true
|
||||
return &tmp.KeyPath
|
||||
case "keyData":
|
||||
gotKeyData = true
|
||||
return &tmp.KeyData
|
||||
case "signedIdentity":
|
||||
return &signedIdentity
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prTypeSigstoreSigned {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||
} else {
|
||||
si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp.SignedIdentity = si
|
||||
}
|
||||
|
||||
var res *prSigstoreSigned
|
||||
var err error
|
||||
switch {
|
||||
case gotKeyPath && gotKeyData:
|
||||
return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
|
||||
case gotKeyPath && !gotKeyData:
|
||||
res, err = newPRSigstoreSignedKeyPath(tmp.KeyPath, tmp.SignedIdentity)
|
||||
case !gotKeyPath && gotKeyData:
|
||||
res, err = newPRSigstoreSignedKeyData(tmp.KeyData, tmp.SignedIdentity)
|
||||
case !gotKeyPath && !gotKeyData:
|
||||
return InvalidPolicyFormatError("At least one of keyPath and keyData must be specified")
|
||||
default: // Coverage: This should never happen
|
||||
return fmt.Errorf("Impossible keyPath/keyData presence combination!?")
|
||||
}
|
||||
if err != nil {
|
||||
// Coverage: This cannot currently happen, creating a prSigstoreSigned only fails
|
||||
// if signedIdentity is nil, which we replace with a default above.
|
||||
return err
|
||||
}
|
||||
*pr = *res
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation.
|
||||
func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
|
||||
var typeField prmCommon
|
||||
|
|
@ -665,7 +564,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
|
|||
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchExact{}
|
||||
var tmp prmMatchExact
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
|
|
@ -695,7 +594,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
|
|||
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchRepoDigestOrExact{}
|
||||
var tmp prmMatchRepoDigestOrExact
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
|
|
@ -725,7 +624,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
|
|||
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchRepository{}
|
||||
var tmp prmMatchRepository
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
|
|
@ -765,7 +664,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
|
|||
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmExactReference{}
|
||||
var tmp prmExactReference
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
"dockerReference": &tmp.DockerReference,
|
||||
}); err != nil {
|
||||
|
|
@ -807,7 +706,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
|
|||
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmExactRepository{}
|
||||
var tmp prmExactRepository
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
"dockerRepository": &tmp.DockerRepository,
|
||||
}); err != nil {
|
||||
|
|
@ -829,12 +728,12 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
|||
// Private objects for validateIdentityRemappingPrefix
|
||||
var (
|
||||
// remapIdentityDomainRegexp matches exactly a reference domain (name[:port])
|
||||
remapIdentityDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$")
|
||||
remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$")
|
||||
// remapIdentityDomainPrefixRegexp matches a reference that starts with a domain;
|
||||
// we need this because reference.NameRegexp accepts short names with docker.io implied.
|
||||
remapIdentityDomainPrefixRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "/")
|
||||
remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/")
|
||||
// remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized)
|
||||
remapIdentityNameRegexp = regexp.MustCompile("^" + reference.NameRegexp.String() + "$")
|
||||
remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$")
|
||||
)
|
||||
|
||||
// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid
|
||||
|
|
@ -879,7 +778,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
|
|||
func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmRemapIdentity{}
|
||||
var tmp prmRemapIdentity
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"type": &tmp.Type,
|
||||
"prefix": &tmp.Prefix,
|
||||
"signedPrefix": &tmp.SignedPrefix,
|
||||
|
|
|
|||
343
vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
generated
vendored
Normal file
343
vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
generated
vendored
Normal file
|
|
@ -0,0 +1,343 @@
|
|||
package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
)
|
||||
|
||||
// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned
|
||||
type PRSigstoreSignedOption func(*prSigstoreSigned) error
|
||||
|
||||
// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.KeyPath != "" {
|
||||
return errors.New(`"keyPath" already specified`)
|
||||
}
|
||||
pr.KeyPath = keyPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.KeyData != nil {
|
||||
return errors.New(`"keyData" already specified`)
|
||||
}
|
||||
pr.KeyData = keyData
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.Fulcio != nil {
|
||||
return errors.New(`"fulcio" already specified`)
|
||||
}
|
||||
pr.Fulcio = fulcio
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.RekorPublicKeyPath != "" {
|
||||
return errors.New(`"rekorPublicKeyPath" already specified`)
|
||||
}
|
||||
pr.RekorPublicKeyPath = rekorPublicKeyPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.RekorPublicKeyData != nil {
|
||||
return errors.New(`"rekorPublicKeyData" already specified`)
|
||||
}
|
||||
pr.RekorPublicKeyData = rekorPublicKeyData
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.SignedIdentity != nil {
|
||||
return errors.New(`"signedIdentity" already specified`)
|
||||
}
|
||||
pr.SignedIdentity = signedIdentity
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type.
|
||||
func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) {
|
||||
res := prSigstoreSigned{
|
||||
prCommon: prCommon{Type: prTypeSigstoreSigned},
|
||||
}
|
||||
for _, o := range options {
|
||||
if err := o(&res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
keySources := 0
|
||||
if res.KeyPath != "" {
|
||||
keySources++
|
||||
}
|
||||
if res.KeyData != nil {
|
||||
keySources++
|
||||
}
|
||||
if res.Fulcio != nil {
|
||||
keySources++
|
||||
}
|
||||
if keySources != 1 {
|
||||
return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified")
|
||||
}
|
||||
|
||||
if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil {
|
||||
return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously")
|
||||
}
|
||||
if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil {
|
||||
return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used")
|
||||
}
|
||||
|
||||
if res.SignedIdentity == nil {
|
||||
return nil, InvalidPolicyFormatError("signedIdentity not specified")
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options.
|
||||
func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) {
|
||||
return newPRSigstoreSigned(options...)
|
||||
}
|
||||
|
||||
// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath
|
||||
func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
|
||||
return NewPRSigstoreSigned(
|
||||
PRSigstoreSignedWithKeyPath(keyPath),
|
||||
PRSigstoreSignedWithSignedIdentity(signedIdentity),
|
||||
)
|
||||
}
|
||||
|
||||
// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData
|
||||
func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
|
||||
return NewPRSigstoreSigned(
|
||||
PRSigstoreSignedWithKeyData(keyData),
|
||||
PRSigstoreSignedWithSignedIdentity(signedIdentity),
|
||||
)
|
||||
}
|
||||
|
||||
// Compile-time check that prSigstoreSigned implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
||||
*pr = prSigstoreSigned{}
|
||||
var tmp prSigstoreSigned
|
||||
var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
|
||||
var fulcio prSigstoreSignedFulcio
|
||||
var signedIdentity json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "keyPath":
|
||||
gotKeyPath = true
|
||||
return &tmp.KeyPath
|
||||
case "keyData":
|
||||
gotKeyData = true
|
||||
return &tmp.KeyData
|
||||
case "fulcio":
|
||||
gotFulcio = true
|
||||
return &fulcio
|
||||
case "rekorPublicKeyPath":
|
||||
gotRekorPublicKeyPath = true
|
||||
return &tmp.RekorPublicKeyPath
|
||||
case "rekorPublicKeyData":
|
||||
gotRekorPublicKeyData = true
|
||||
return &tmp.RekorPublicKeyData
|
||||
case "signedIdentity":
|
||||
return &signedIdentity
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prTypeSigstoreSigned {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||
} else {
|
||||
si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp.SignedIdentity = si
|
||||
}
|
||||
|
||||
var opts []PRSigstoreSignedOption
|
||||
if gotKeyPath {
|
||||
opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath))
|
||||
}
|
||||
if gotKeyData {
|
||||
opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData))
|
||||
}
|
||||
if gotFulcio {
|
||||
opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio))
|
||||
}
|
||||
if gotRekorPublicKeyPath {
|
||||
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath))
|
||||
}
|
||||
if gotRekorPublicKeyData {
|
||||
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData))
|
||||
}
|
||||
opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity))
|
||||
|
||||
res, err := newPRSigstoreSigned(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*pr = *res
|
||||
return nil
|
||||
}
|
||||
|
||||
// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio
|
||||
type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error
|
||||
|
||||
// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio
|
||||
func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.CAPath != "" {
|
||||
return errors.New(`"caPath" already specified`)
|
||||
}
|
||||
f.CAPath = caPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio
|
||||
func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.CAData != nil {
|
||||
return errors.New(`"caData" already specified`)
|
||||
}
|
||||
f.CAData = caData
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio
|
||||
func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.OIDCIssuer != "" {
|
||||
return errors.New(`"oidcIssuer" already specified`)
|
||||
}
|
||||
f.OIDCIssuer = oidcIssuer
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio
|
||||
func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.SubjectEmail != "" {
|
||||
return errors.New(`"subjectEmail" already specified`)
|
||||
}
|
||||
f.SubjectEmail = subjectEmail
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type
|
||||
func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) {
|
||||
res := prSigstoreSignedFulcio{}
|
||||
for _, o := range options {
|
||||
if err := o(&res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if res.CAPath != "" && res.CAData != nil {
|
||||
return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously")
|
||||
}
|
||||
if res.CAPath == "" && res.CAData == nil {
|
||||
return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified")
|
||||
}
|
||||
if res.OIDCIssuer == "" {
|
||||
return nil, InvalidPolicyFormatError("oidcIssuer not specified")
|
||||
}
|
||||
if res.SubjectEmail == "" {
|
||||
return nil, InvalidPolicyFormatError("subjectEmail not specified")
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options.
|
||||
func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) {
|
||||
return newPRSigstoreSignedFulcio(options...)
|
||||
}
|
||||
|
||||
// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil)
|
||||
|
||||
func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error {
|
||||
*f = prSigstoreSignedFulcio{}
|
||||
var tmp prSigstoreSignedFulcio
|
||||
var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false...
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
switch key {
|
||||
case "caPath":
|
||||
gotCAPath = true
|
||||
return &tmp.CAPath
|
||||
case "caData":
|
||||
gotCAData = true
|
||||
return &tmp.CAData
|
||||
case "oidcIssuer":
|
||||
gotOIDCIssuer = true
|
||||
return &tmp.OIDCIssuer
|
||||
case "subjectEmail":
|
||||
gotSubjectEmail = true
|
||||
return &tmp.SubjectEmail
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var opts []PRSigstoreSignedFulcioOption
|
||||
if gotCAPath {
|
||||
opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath))
|
||||
}
|
||||
if gotCAData {
|
||||
opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData))
|
||||
}
|
||||
if gotOIDCIssuer {
|
||||
opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer))
|
||||
}
|
||||
if gotSubjectEmail {
|
||||
opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail))
|
||||
}
|
||||
|
||||
res, err := newPRSigstoreSignedFulcio(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*f = *res
|
||||
return nil
|
||||
}
|
||||
10
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
10
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
|
|
@ -46,7 +46,7 @@ type PolicyRequirement interface {
|
|||
// - sarRejected if the signature has not been verified;
|
||||
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
|
||||
// succeeded but the result was rejection.
|
||||
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
|
||||
// - sarUnknown if this PolicyRequirement does not deal with signatures.
|
||||
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
|
||||
// Returning sarUnknown and a non-nil error value is invalid.
|
||||
// WARNING: This makes the signature contents acceptable for further processing,
|
||||
|
|
@ -172,10 +172,10 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
|
|||
// but it does not necessarily mean that the contents of the signature are
|
||||
// consistent with local policy.
|
||||
// For example:
|
||||
// - Do not use a an existence of an accepted signature to determine whether to run
|
||||
// a container based on this image; use IsRunningImageAllowed instead.
|
||||
// - Just because a signature is accepted does not automatically mean the contents of the
|
||||
// signature are authorized to run code as root, or to affect system or cluster configuration.
|
||||
// - Do not use a an existence of an accepted signature to determine whether to run
|
||||
// a container based on this image; use IsRunningImageAllowed instead.
|
||||
// - Just because a signature is accepted does not automatically mean the contents of the
|
||||
// signature are authorized to run code as root, or to affect system or cluster configuration.
|
||||
func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) {
|
||||
if err := pc.changeState(pcReady, pcInUse); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
7
vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
generated
vendored
7
vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go
generated
vendored
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||
|
|
@ -67,10 +68,8 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
|
|||
|
||||
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
|
||||
validateKeyIdentity: func(keyIdentity string) error {
|
||||
for _, trustedIdentity := range trustedIdentities {
|
||||
if keyIdentity == trustedIdentity {
|
||||
return nil
|
||||
}
|
||||
if slices.Contains(trustedIdentities, keyIdentity) {
|
||||
return nil
|
||||
}
|
||||
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
|
||||
// not be reachable.
|
||||
|
|
|
|||
175
vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
generated
vendored
175
vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go
generated
vendored
|
|
@ -4,6 +4,9 @@ package signature
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
|
@ -17,6 +20,100 @@ import (
|
|||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
)
|
||||
|
||||
// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set,
|
||||
// and returns the referenced data, or nil if neither is set.
|
||||
func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) {
|
||||
switch {
|
||||
case data != nil && path != "":
|
||||
return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix)
|
||||
case path != "":
|
||||
d, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case data != nil:
|
||||
return data, nil
|
||||
default: // Nothing
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// prepareTrustRoot creates a fulcioTrustRoot from the input data.
|
||||
// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
|
||||
func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
|
||||
caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if caCertBytes == nil {
|
||||
return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`)
|
||||
}
|
||||
certs := x509.NewCertPool()
|
||||
if ok := certs.AppendCertsFromPEM(caCertBytes); !ok {
|
||||
return nil, errors.New("error loading Fulcio CA certificates")
|
||||
}
|
||||
fulcio := fulcioTrustRoot{
|
||||
caCertificates: certs,
|
||||
oidcIssuer: f.OIDCIssuer,
|
||||
subjectEmail: f.SubjectEmail,
|
||||
}
|
||||
if err := fulcio.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fulcio, nil
|
||||
}
|
||||
|
||||
// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy
|
||||
type sigstoreSignedTrustRoot struct {
|
||||
publicKey crypto.PublicKey
|
||||
fulcio *fulcioTrustRoot
|
||||
rekorPublicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) {
|
||||
res := sigstoreSignedTrustRoot{}
|
||||
|
||||
publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if publicKeyPEM != nil {
|
||||
pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing public key: %w", err)
|
||||
}
|
||||
res.publicKey = pk
|
||||
}
|
||||
|
||||
if pr.Fulcio != nil {
|
||||
f, err := pr.Fulcio.prepareTrustRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res.fulcio = f
|
||||
}
|
||||
|
||||
rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rekorPublicKeyPEM != nil {
|
||||
pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing Rekor public key: %w", err)
|
||||
}
|
||||
pkECDSA, ok := pk.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Rekor public key is not using ECDSA")
|
||||
|
||||
}
|
||||
res.rekorPublicKey = pkECDSA
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||
// We don’t know of a single user of this API, and we might return unexpected values in Signature.
|
||||
// For now, just punt.
|
||||
|
|
@ -24,24 +121,10 @@ func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image
|
|||
}
|
||||
|
||||
func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) {
|
||||
if pr.KeyPath != "" && pr.KeyData != nil {
|
||||
return sarRejected, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
|
||||
}
|
||||
// FIXME: move this to per-context initialization
|
||||
var publicKeyPEM []byte
|
||||
if pr.KeyData != nil {
|
||||
publicKeyPEM = pr.KeyData
|
||||
} else {
|
||||
d, err := os.ReadFile(pr.KeyPath)
|
||||
if err != nil {
|
||||
return sarRejected, err
|
||||
}
|
||||
publicKeyPEM = d
|
||||
}
|
||||
|
||||
publicKey, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
|
||||
trustRoot, err := pr.prepareTrustRoot()
|
||||
if err != nil {
|
||||
return sarRejected, fmt.Errorf("parsing public key: %w", err)
|
||||
return sarRejected, err
|
||||
}
|
||||
|
||||
untrustedAnnotations := sig.UntrustedAnnotations()
|
||||
|
|
@ -49,8 +132,66 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
|
|||
if !ok {
|
||||
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey)
|
||||
}
|
||||
untrustedPayload := sig.UntrustedPayload()
|
||||
|
||||
signature, err := internal.VerifySigstorePayload(publicKey, sig.UntrustedPayload(), untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
|
||||
var publicKey crypto.PublicKey
|
||||
switch {
|
||||
case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified")
|
||||
case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified")
|
||||
|
||||
case trustRoot.publicKey != nil:
|
||||
if trustRoot.rekorPublicKey != nil {
|
||||
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
|
||||
if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work.
|
||||
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
|
||||
}
|
||||
// We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies.
|
||||
// FIXME: We could just generate DER instead of the full PEM text
|
||||
recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey)
|
||||
if err != nil {
|
||||
// Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail.
|
||||
// (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
|
||||
return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
|
||||
|
||||
}
|
||||
// We don’t care about the Rekor timestamp, just about log presence.
|
||||
if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil {
|
||||
return sarRejected, err
|
||||
}
|
||||
}
|
||||
publicKey = trustRoot.publicKey
|
||||
|
||||
case trustRoot.fulcio != nil:
|
||||
if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key")
|
||||
}
|
||||
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
|
||||
if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway.
|
||||
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
|
||||
}
|
||||
untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey]
|
||||
if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway.
|
||||
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey)
|
||||
}
|
||||
var untrustedIntermediateChainBytes []byte
|
||||
if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok {
|
||||
untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain)
|
||||
}
|
||||
pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio,
|
||||
[]byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload)
|
||||
if err != nil {
|
||||
return sarRejected, err
|
||||
}
|
||||
publicKey = pk
|
||||
}
|
||||
|
||||
if publicKey == nil {
|
||||
// Coverage: This should never happen, we have already excluded the possibility in the switch above.
|
||||
return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload")
|
||||
}
|
||||
signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
|
||||
ValidateSignedDockerReference: func(ref string) error {
|
||||
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
|
||||
|
|
|
|||
37
vendor/github.com/containers/image/v5/signature/policy_types.go
generated
vendored
37
vendor/github.com/containers/image/v5/signature/policy_types.go
generated
vendored
|
|
@ -111,13 +111,24 @@ type prSignedBaseLayer struct {
|
|||
type prSigstoreSigned struct {
|
||||
prCommon
|
||||
|
||||
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath and KeyData must be specified.
|
||||
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified.
|
||||
KeyPath string `json:"keyPath,omitempty"`
|
||||
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath and KeyData must be specified.
|
||||
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified.
|
||||
KeyData []byte `json:"keyData,omitempty"`
|
||||
// FIXME: Multiple public keys?
|
||||
|
||||
// FIXME: Support fulcio+rekor as an alternative.
|
||||
// Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified.
|
||||
// If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well.
|
||||
Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"`
|
||||
|
||||
// RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures.
|
||||
// If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
|
||||
// (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"`
|
||||
// RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures.
|
||||
// If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
|
||||
// (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"`
|
||||
|
||||
// SignedIdentity specifies what image identity the signature must be claiming about the image.
|
||||
// Defaults to "matchRepoDigestOrExact" if not specified.
|
||||
|
|
@ -125,6 +136,26 @@ type prSigstoreSigned struct {
|
|||
SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
|
||||
}
|
||||
|
||||
// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement.
|
||||
// This is a public type with a single private implementation.
|
||||
type PRSigstoreSignedFulcio interface {
|
||||
// toFulcioTrustRoot creates a fulcioTrustRoot from the input data.
|
||||
// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
|
||||
prepareTrustRoot() (*fulcioTrustRoot, error)
|
||||
}
|
||||
|
||||
// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned
|
||||
type prSigstoreSignedFulcio struct {
|
||||
// CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified.
|
||||
CAPath string `json:"caPath,omitempty"`
|
||||
// CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified.
|
||||
CAData []byte `json:"caData,omitempty"`
|
||||
// OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates.
|
||||
OIDCIssuer string `json:"oidcIssuer,omitempty"`
|
||||
// SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates.
|
||||
SubjectEmail string `json:"subjectEmail,omitempty"`
|
||||
}
|
||||
|
||||
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
|
||||
// The type is public, but its implementation is private.
|
||||
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/signature/signer/signer.go
generated
vendored
Normal file
9
vendor/github.com/containers/image/v5/signature/signer/signer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
package signer
|
||||
|
||||
import "github.com/containers/image/v5/internal/signer"
|
||||
|
||||
// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images.
|
||||
// It can only be created from within the containers/image package; it can’t be implemented externally.
|
||||
//
|
||||
// The owner of a Signer must call Close() when done.
|
||||
type Signer = signer.Signer
|
||||
41
vendor/github.com/containers/image/v5/signature/sigstore/copied.go
generated
vendored
41
vendor/github.com/containers/image/v5/signature/sigstore/copied.go
generated
vendored
|
|
@ -10,6 +10,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature"
|
||||
"github.com/theupdateframework/go-tuf/encrypted"
|
||||
)
|
||||
|
|
@ -32,19 +33,21 @@ import (
|
|||
// limitations under the License.
|
||||
|
||||
const (
|
||||
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType
|
||||
sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
|
||||
// from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType.
|
||||
cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
|
||||
// from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType.
|
||||
sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY"
|
||||
)
|
||||
|
||||
// from sigstore/cosign/pkg/cosign.loadPrivateKey
|
||||
// FIXME: Do we need all of these key formats, and all of those
|
||||
// FIXME: Do we need all of these key formats?
|
||||
func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
|
||||
// Decrypt first
|
||||
p, _ := pem.Decode(key)
|
||||
if p == nil {
|
||||
return nil, errors.New("invalid pem block")
|
||||
}
|
||||
if p.Type != sigstorePrivateKeyPemType {
|
||||
if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType {
|
||||
return nil, fmt.Errorf("unsupported pem type: %s", p.Type)
|
||||
}
|
||||
|
||||
|
|
@ -68,3 +71,33 @@ func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
|
|||
return nil, errors.New("unsupported key type")
|
||||
}
|
||||
}
|
||||
|
||||
// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair
|
||||
// loadPrivateKey always requires a encryption, so this always requires a passphrase.
|
||||
func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) {
|
||||
x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("x509 encoding private key: %w", err)
|
||||
}
|
||||
|
||||
encBytes, err := encrypted.Encrypt(x509Encoded, password)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// store in PEM format
|
||||
privBytes := pem.EncodeToMemory(&pem.Block{
|
||||
Bytes: encBytes,
|
||||
// Use the older “COSIGN” type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE” types,
|
||||
// but a version of cosign that can accept them has not yet been released.
|
||||
Type: cosignPrivateKeyPemType,
|
||||
})
|
||||
|
||||
// Now do the public key
|
||||
pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return privBytes, pubBytes, nil
|
||||
}
|
||||
|
|
|
|||
35
vendor/github.com/containers/image/v5/signature/sigstore/generate.go
generated
vendored
Normal file
35
vendor/github.com/containers/image/v5/signature/sigstore/generate.go
generated
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
package sigstore
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
)
|
||||
|
||||
// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller.
|
||||
type GenerateKeyPairResult struct {
|
||||
PublicKey []byte
|
||||
PrivateKey []byte
|
||||
}
|
||||
|
||||
// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format,
|
||||
// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase).
|
||||
// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API,
|
||||
// and can change with best practices over time.
|
||||
func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) {
|
||||
// https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes
|
||||
// only requires ECDSA-P256 to be supported, so that’s what we must use.
|
||||
rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
// Coverage: This can fail only if the randomness source fails
|
||||
return nil, err
|
||||
}
|
||||
private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GenerateKeyPairResult{
|
||||
PublicKey: public,
|
||||
PrivateKey: private,
|
||||
}, nil
|
||||
}
|
||||
95
vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go
generated
vendored
Normal file
95
vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
|
||||
)
|
||||
|
||||
type Option func(*SigstoreSigner) error
|
||||
|
||||
// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures.
|
||||
// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the
|
||||
// dependency impact.
|
||||
type SigstoreSigner struct {
|
||||
PrivateKey sigstoreSignature.Signer // May be nil during initialization
|
||||
SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey
|
||||
|
||||
// Fulcio results to include
|
||||
FulcioGeneratedCertificate []byte // Or nil
|
||||
FulcioGeneratedCertificateChain []byte // Or nil
|
||||
|
||||
// Rekor state
|
||||
RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil
|
||||
}
|
||||
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
func (s *SigstoreSigner) ProgressMessage() string {
|
||||
return "Signing image using a sigstore signature"
|
||||
}
|
||||
|
||||
// SignImageManifest creates a new signature for manifest m as dockerReference.
|
||||
func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) {
|
||||
if s.PrivateKey == nil {
|
||||
return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner")
|
||||
}
|
||||
|
||||
if reference.IsNameOnly(dockerReference) {
|
||||
return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// sigstore/cosign completely ignores dockerReference for actual policy decisions.
|
||||
// They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
|
||||
// So, just do what simple signing does, and cosign won’t mind.
|
||||
payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
|
||||
payloadBytes, err := json.Marshal(payloadData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
|
||||
// which seems to be not used by anything. So we don’t bother.
|
||||
signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
}
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
|
||||
var rekorSETBytes []byte // = nil
|
||||
if s.RekorUploader != nil {
|
||||
set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rekorSETBytes = set
|
||||
}
|
||||
|
||||
annotations := map[string]string{
|
||||
signature.SigstoreSignatureAnnotationKey: base64Signature,
|
||||
}
|
||||
if s.FulcioGeneratedCertificate != nil {
|
||||
annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate)
|
||||
}
|
||||
if s.FulcioGeneratedCertificateChain != nil {
|
||||
annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain)
|
||||
}
|
||||
if rekorSETBytes != nil {
|
||||
annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes)
|
||||
}
|
||||
return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil
|
||||
}
|
||||
|
||||
func (s *SigstoreSigner) Close() error {
|
||||
return nil
|
||||
}
|
||||
65
vendor/github.com/containers/image/v5/signature/sigstore/sign.go
generated
vendored
65
vendor/github.com/containers/image/v5/signature/sigstore/sign.go
generated
vendored
|
|
@ -1,65 +0,0 @@
|
|||
package sigstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
|
||||
)
|
||||
|
||||
// SignDockerManifestWithPrivateKeyFileUnstable returns a signature for manifest as the specified dockerReference,
|
||||
// using a private key and an optional passphrase.
|
||||
//
|
||||
// Yes, this returns an internal type, and should currently not be used outside of c/image.
|
||||
// There is NO COMITTMENT TO STABLE API.
|
||||
func SignDockerManifestWithPrivateKeyFileUnstable(m []byte, dockerReference reference.Named, privateKeyFile string, passphrase []byte) (signature.Sigstore, error) {
|
||||
privateKeyPEM, err := os.ReadFile(privateKeyFile)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, fmt.Errorf("reading private key from %s: %w", privateKeyFile, err)
|
||||
}
|
||||
signer, err := loadPrivateKey(privateKeyPEM, passphrase)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, fmt.Errorf("initializing private key: %w", err)
|
||||
}
|
||||
|
||||
return signDockerManifest(m, dockerReference, signer)
|
||||
}
|
||||
|
||||
func signDockerManifest(m []byte, dockerReference reference.Named, signer sigstoreSignature.Signer) (signature.Sigstore, error) {
|
||||
if reference.IsNameOnly(dockerReference) {
|
||||
return signature.Sigstore{}, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, err
|
||||
}
|
||||
// sigstore/cosign completely ignores dockerReference for actual policy decisions.
|
||||
// They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks.
|
||||
// So, just do what simple signing does, and cosign won’t mind.
|
||||
payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String())
|
||||
payloadBytes, err := json.Marshal(payloadData)
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, err
|
||||
}
|
||||
|
||||
// github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(),
|
||||
// which seems to be not used by anything. So we don’t bother.
|
||||
signatureBytes, err := signer.SignMessage(bytes.NewReader(payloadBytes))
|
||||
if err != nil {
|
||||
return signature.Sigstore{}, fmt.Errorf("creating signature: %w", err)
|
||||
}
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signatureBytes)
|
||||
|
||||
return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType,
|
||||
payloadBytes,
|
||||
map[string]string{
|
||||
signature.SigstoreSignatureAnnotationKey: base64Signature,
|
||||
}), nil
|
||||
}
|
||||
60
vendor/github.com/containers/image/v5/signature/sigstore/signer.go
generated
vendored
Normal file
60
vendor/github.com/containers/image/v5/signature/sigstore/signer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
package sigstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
internalSigner "github.com/containers/image/v5/internal/signer"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/signature/sigstore/internal"
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
)
|
||||
|
||||
type Option = internal.Option
|
||||
|
||||
func WithPrivateKeyFile(file string, passphrase []byte) Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
if s.PrivateKey != nil {
|
||||
return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
|
||||
}
|
||||
|
||||
if passphrase == nil {
|
||||
return errors.New("private key passphrase not provided")
|
||||
}
|
||||
|
||||
privateKeyPEM, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading private key from %s: %w", file, err)
|
||||
}
|
||||
signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing private key: %w", err)
|
||||
}
|
||||
publicKey, err := signerVerifier.PublicKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting public key from private key: %w", err)
|
||||
}
|
||||
publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting public key to PEM: %w", err)
|
||||
}
|
||||
s.PrivateKey = signerVerifier
|
||||
s.SigningKeyOrCert = publicKeyPEM
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewSigner(opts ...Option) (*signer.Signer, error) {
|
||||
s := internal.SigstoreSigner{}
|
||||
for _, o := range opts {
|
||||
if err := o(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.PrivateKey == nil {
|
||||
return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures")
|
||||
}
|
||||
|
||||
return internalSigner.NewSigner(&s), nil
|
||||
}
|
||||
77
vendor/github.com/containers/image/v5/signature/simple.go
generated
vendored
77
vendor/github.com/containers/image/v5/signature/simple.go
generated
vendored
|
|
@ -31,14 +31,14 @@ type Signature struct {
|
|||
|
||||
// untrustedSignature is a parsed content of a signature.
|
||||
type untrustedSignature struct {
|
||||
UntrustedDockerManifestDigest digest.Digest
|
||||
UntrustedDockerReference string // FIXME: more precise type?
|
||||
UntrustedCreatorID *string
|
||||
untrustedDockerManifestDigest digest.Digest
|
||||
untrustedDockerReference string // FIXME: more precise type?
|
||||
untrustedCreatorID *string
|
||||
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
|
||||
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
|
||||
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
|
||||
// we would add another field, UntrustedTimestampNS int64.
|
||||
UntrustedTimestamp *int64
|
||||
untrustedTimestamp *int64
|
||||
}
|
||||
|
||||
// UntrustedSignatureInformation is information available in an untrusted signature.
|
||||
|
|
@ -65,34 +65,35 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s
|
|||
creatorID := "atomic " + version.Version
|
||||
timestamp := time.Now().Unix()
|
||||
return untrustedSignature{
|
||||
UntrustedDockerManifestDigest: dockerManifestDigest,
|
||||
UntrustedDockerReference: dockerReference,
|
||||
UntrustedCreatorID: &creatorID,
|
||||
UntrustedTimestamp: ×tamp,
|
||||
untrustedDockerManifestDigest: dockerManifestDigest,
|
||||
untrustedDockerReference: dockerReference,
|
||||
untrustedCreatorID: &creatorID,
|
||||
untrustedTimestamp: ×tamp,
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check that untrustedSignature implements json.Marshaler
|
||||
// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler
|
||||
var _ json.Marshaler = untrustedSignature{}
|
||||
var _ json.Marshaler = (*untrustedSignature)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s untrustedSignature) MarshalJSON() ([]byte, error) {
|
||||
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
|
||||
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
|
||||
return nil, errors.New("Unexpected empty signature content")
|
||||
}
|
||||
critical := map[string]interface{}{
|
||||
critical := map[string]any{
|
||||
"type": signatureType,
|
||||
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
|
||||
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
|
||||
}
|
||||
optional := map[string]interface{}{}
|
||||
if s.UntrustedCreatorID != nil {
|
||||
optional["creator"] = *s.UntrustedCreatorID
|
||||
optional := map[string]any{}
|
||||
if s.untrustedCreatorID != nil {
|
||||
optional["creator"] = *s.untrustedCreatorID
|
||||
}
|
||||
if s.UntrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.UntrustedTimestamp
|
||||
if s.untrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.untrustedTimestamp
|
||||
}
|
||||
signature := map[string]interface{}{
|
||||
signature := map[string]any{
|
||||
"critical": critical,
|
||||
"optional": optional,
|
||||
}
|
||||
|
|
@ -117,7 +118,7 @@ func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
|
|||
// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
||||
var critical, optional json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
|
||||
"critical": &critical,
|
||||
"optional": &optional,
|
||||
}); err != nil {
|
||||
|
|
@ -127,7 +128,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||
var creatorID string
|
||||
var timestamp float64
|
||||
var gotCreatorID, gotTimestamp = false, false
|
||||
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
|
||||
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any {
|
||||
switch key {
|
||||
case "creator":
|
||||
gotCreatorID = true
|
||||
|
|
@ -136,26 +137,26 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||
gotTimestamp = true
|
||||
return ×tamp
|
||||
default:
|
||||
var ignore interface{}
|
||||
var ignore any
|
||||
return &ignore
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if gotCreatorID {
|
||||
s.UntrustedCreatorID = &creatorID
|
||||
s.untrustedCreatorID = &creatorID
|
||||
}
|
||||
if gotTimestamp {
|
||||
intTimestamp := int64(timestamp)
|
||||
if float64(intTimestamp) != timestamp {
|
||||
return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
|
||||
}
|
||||
s.UntrustedTimestamp = &intTimestamp
|
||||
s.untrustedTimestamp = &intTimestamp
|
||||
}
|
||||
|
||||
var t string
|
||||
var image, identity json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
|
||||
"type": &t,
|
||||
"image": &image,
|
||||
"identity": &identity,
|
||||
|
|
@ -167,15 +168,15 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
var digestString string
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
|
||||
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
|
||||
"docker-manifest-digest": &digestString,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
|
||||
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
|
||||
"docker-reference": &s.UntrustedDockerReference,
|
||||
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||
"docker-reference": &s.untrustedDockerReference,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -228,16 +229,16 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
|
|||
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(err.Error())
|
||||
}
|
||||
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
|
||||
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
|
||||
if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// signatureAcceptanceRules have accepted this value.
|
||||
return &Signature{
|
||||
DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
|
||||
DockerReference: unmatchedSignature.UntrustedDockerReference,
|
||||
DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest,
|
||||
DockerReference: unmatchedSignature.untrustedDockerReference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -268,14 +269,14 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []
|
|||
}
|
||||
|
||||
var timestamp *time.Time // = nil
|
||||
if untrustedDecodedContents.UntrustedTimestamp != nil {
|
||||
ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
|
||||
if untrustedDecodedContents.untrustedTimestamp != nil {
|
||||
ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0)
|
||||
timestamp = &ts
|
||||
}
|
||||
return &UntrustedSignatureInformation{
|
||||
UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
|
||||
UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
|
||||
UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
|
||||
UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest,
|
||||
UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference,
|
||||
UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID,
|
||||
UntrustedTimestamp: timestamp,
|
||||
UntrustedShortKeyIdentifier: shortKeyIdentifier,
|
||||
}, nil
|
||||
|
|
|
|||
105
vendor/github.com/containers/image/v5/signature/simplesigning/signer.go
generated
vendored
Normal file
105
vendor/github.com/containers/image/v5/signature/simplesigning/signer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
package simplesigning
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
internalSig "github.com/containers/image/v5/internal/signature"
|
||||
internalSigner "github.com/containers/image/v5/internal/signer"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
)
|
||||
|
||||
// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures.
|
||||
type simpleSigner struct {
|
||||
mech signature.SigningMechanism
|
||||
keyFingerprint string
|
||||
passphrase string // "" if not provided.
|
||||
}
|
||||
|
||||
type Option func(*simpleSigner) error
|
||||
|
||||
// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint.
|
||||
func WithKeyFingerprint(keyFingerprint string) Option {
|
||||
return func(s *simpleSigner) error {
|
||||
s.keyFingerprint = keyFingerprint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key.
|
||||
// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry.
|
||||
func WithPassphrase(passphrase string) Option {
|
||||
return func(s *simpleSigner) error {
|
||||
// The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
|
||||
if strings.Contains(passphrase, "\n") {
|
||||
return errors.New("invalid passphrase: must not contain a line break")
|
||||
}
|
||||
s.passphrase = passphrase
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewSigner returns a signature.Signer which creates “simple signing” signatures using the user’s default
|
||||
// GPG configuration ($GNUPGHOME / ~/.gnupg).
|
||||
//
|
||||
// The set of options must identify a key to sign with, probably using a WithKeyFingerprint.
|
||||
//
|
||||
// The caller must call Close() on the returned Signer.
|
||||
func NewSigner(opts ...Option) (*signer.Signer, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing GPG: %w", err)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
mech.Close()
|
||||
}
|
||||
}()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return nil, fmt.Errorf("Signing not supported: %w", err)
|
||||
}
|
||||
|
||||
s := simpleSigner{
|
||||
mech: mech,
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.keyFingerprint == "" {
|
||||
return nil, errors.New("no key identity provided for simple signing")
|
||||
}
|
||||
// Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that.
|
||||
|
||||
succeeded = true
|
||||
return internalSigner.NewSigner(&s), nil
|
||||
}
|
||||
|
||||
// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature.
|
||||
func (s *simpleSigner) ProgressMessage() string {
|
||||
return "Signing image using simple signing"
|
||||
}
|
||||
|
||||
// SignImageManifest creates a new signature for manifest m as dockerReference.
|
||||
func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) {
|
||||
if reference.IsNameOnly(dockerReference) {
|
||||
return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String())
|
||||
}
|
||||
simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{
|
||||
Passphrase: s.passphrase,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return internalSig.SimpleSigningFromBlob(simpleSig), nil
|
||||
}
|
||||
|
||||
func (s *simpleSigner) Close() error {
|
||||
return s.mech.Close()
|
||||
}
|
||||
10
vendor/github.com/containers/image/v5/transports/transports.go
generated
vendored
10
vendor/github.com/containers/image/v5/transports/transports.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
|
|
@ -66,22 +67,21 @@ func Register(t types.ImageTransport) {
|
|||
// This is the generally recommended way to refer to images in the UI.
|
||||
//
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
func ImageName(ref types.ImageReference) string {
|
||||
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
|
||||
}
|
||||
|
||||
var deprecatedTransports = set.NewWithValues("atomic")
|
||||
|
||||
// ListNames returns a list of non deprecated transport names.
|
||||
// Deprecated transports can be used, but are not presented to users.
|
||||
func ListNames() []string {
|
||||
kt.mu.Lock()
|
||||
defer kt.mu.Unlock()
|
||||
deprecated := map[string]bool{
|
||||
"atomic": true,
|
||||
}
|
||||
var names []string
|
||||
for _, transport := range kt.transports {
|
||||
if !deprecated[transport.Name()] {
|
||||
if !deprecatedTransports.Contains(transport.Name()) {
|
||||
names = append(names, transport.Name())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
59
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
59
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
|
|
@ -11,7 +11,7 @@ import (
|
|||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageTransport is a top-level namespace for ways to to store/load an image.
|
||||
// ImageTransport is a top-level namespace for ways to store/load an image.
|
||||
// It should generally correspond to ImageSource/ImageDestination implementations.
|
||||
//
|
||||
// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
|
||||
|
|
@ -48,7 +48,7 @@ type ImageReference interface {
|
|||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
|
||||
// instead, see transports.ImageName().
|
||||
StringWithinTransport() string
|
||||
|
|
@ -125,13 +125,20 @@ type BlobInfo struct {
|
|||
URLs []string
|
||||
Annotations map[string]string
|
||||
MediaType string
|
||||
|
||||
// NOTE: The following fields contain desired _edits_ to blob infos.
|
||||
// Conceptually then don't belong in the BlobInfo object at all;
|
||||
// the edits should be provided specifically as parameters to the edit implementation.
|
||||
// We can’t remove the fields without breaking compatibility, but don’t
|
||||
// add any more.
|
||||
|
||||
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
|
||||
// whether the original layer's "compressed or not" should be preserved,
|
||||
// possibly while changing the compression algorithm from one to another,
|
||||
// or if it should be compressed or decompressed. The field defaults to
|
||||
// preserve the original layer's compressedness.
|
||||
// TODO: To remove together with CryptoOperation in re-design to remove
|
||||
// field out out of BlobInfo.
|
||||
// field out of BlobInfo.
|
||||
CompressionOperation LayerCompression
|
||||
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
|
||||
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
|
||||
|
|
@ -142,8 +149,9 @@ type BlobInfo struct {
|
|||
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
|
||||
// whether the original layer was encrypted/decrypted
|
||||
// TODO: To remove together with CompressionOperation in re-design to
|
||||
// remove field out out of BlobInfo.
|
||||
// remove field out of BlobInfo.
|
||||
CryptoOperation LayerCrypto
|
||||
// Before adding any fields to this struct, read the NOTE above.
|
||||
}
|
||||
|
||||
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
|
||||
|
|
@ -177,24 +185,25 @@ type BICReplacementCandidate struct {
|
|||
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
|
||||
//
|
||||
// It records two kinds of data:
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
|
||||
// compress/decompress blobs for their own purposes.
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
|
||||
//
|
||||
// - Known blob locations, managed by individual transports:
|
||||
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
|
||||
// recording transport-specific information that allows the transport to reuse the blob in the future;
|
||||
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
|
||||
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
|
||||
// compress/decompress blobs for their own purposes.
|
||||
//
|
||||
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
|
||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||
// - Known blob locations, managed by individual transports:
|
||||
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
|
||||
// recording transport-specific information that allows the transport to reuse the blob in the future;
|
||||
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
|
||||
//
|
||||
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
|
||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||
//
|
||||
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
||||
// users of the cache should just fall back to copying the blobs the usual way.
|
||||
|
|
@ -465,7 +474,17 @@ type ImageInspectInfo struct {
|
|||
Variant string
|
||||
Os string
|
||||
Layers []string
|
||||
LayersData []ImageInspectLayer
|
||||
Env []string
|
||||
Author string
|
||||
}
|
||||
|
||||
// ImageInspectLayer is a set of metadata describing an image layers' detail
|
||||
type ImageInspectLayer struct {
|
||||
MIMEType string // "" if unknown.
|
||||
Digest digest.Digest
|
||||
Size int64 // -1 if unknown.
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
// DockerAuthConfig contains authorization information for connecting to a registry.
|
||||
|
|
@ -566,9 +585,9 @@ type SystemContext struct {
|
|||
// resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
|
||||
// specific context.
|
||||
PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
|
||||
// If not "", overrides the default path for the authentication file, but only new format files
|
||||
// If not "", overrides the default path for the registry authentication file, but only new format files
|
||||
AuthFilePath string
|
||||
// if not "", overrides the default path for the authentication file, but with the legacy format;
|
||||
// if not "", overrides the default path for the registry authentication file, but with the legacy format;
|
||||
// the code currently will by default look for legacy format files like .dockercfg in the $HOME dir;
|
||||
// but in addition to the home dir, openshift may mount .dockercfg files (via secret mount)
|
||||
// in locations other than the home dir; openshift components should then set this field in those cases;
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue