go.mod: update to images@v0.117.0
This commit updates to images v0.117.0 so that the cross-distro.sh test works again (images removed fedora-39.json in main but the uses the previous version of images that includes fedora-39 so there is a mismatch (we should look into if there is a way to get github.com/osbuild/images@latest instead of main in the cross-arch test). It also updates all the vendor stuff that got pulled via the new images release (which is giantonormous). This update requires updating the Go version to 1.22.8
This commit is contained in:
parent
886ddc0bcc
commit
409b4f6048
584 changed files with 60776 additions and 50181 deletions
12
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/docker/go-units"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
|
@ -710,7 +710,7 @@ func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([
|
|||
switch format {
|
||||
case graphdriver.DifferOutputFormatDir:
|
||||
case graphdriver.DifferOutputFormatFlat:
|
||||
entries, err = makeEntriesFlat(entries)
|
||||
entries, err = makeEntriesFlat(entries, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -848,12 +848,12 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo
|
|||
return "", "", nil
|
||||
}
|
||||
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *minimal.FileMetadata) (string, string, int64, error) {
|
||||
return c.findDigestInternal(chunk.ChunkDigest)
|
||||
}
|
||||
|
||||
func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
var toc internal.TOC
|
||||
func unmarshalToc(manifest []byte) (*minimal.TOC, error) {
|
||||
var toc minimal.TOC
|
||||
|
||||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
|
||||
|
|
@ -864,7 +864,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
|||
|
||||
case "entries":
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
var m minimal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch strings.ToLower(field) {
|
||||
case "type":
|
||||
|
|
|
|||
18
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
|
|
@ -4,18 +4,18 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeReg = internal.TypeReg
|
||||
TypeChunk = internal.TypeChunk
|
||||
TypeLink = internal.TypeLink
|
||||
TypeChar = internal.TypeChar
|
||||
TypeBlock = internal.TypeBlock
|
||||
TypeDir = internal.TypeDir
|
||||
TypeFifo = internal.TypeFifo
|
||||
TypeSymlink = internal.TypeSymlink
|
||||
TypeReg = minimal.TypeReg
|
||||
TypeChunk = minimal.TypeChunk
|
||||
TypeLink = minimal.TypeLink
|
||||
TypeChar = minimal.TypeChar
|
||||
TypeBlock = minimal.TypeBlock
|
||||
TypeDir = minimal.TypeDir
|
||||
TypeFifo = minimal.TypeFifo
|
||||
TypeSymlink = minimal.TypeSymlink
|
||||
)
|
||||
|
||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
|
|
|
|||
189
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
189
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
|
|
@ -10,7 +10,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
|
@ -20,6 +20,12 @@ import (
|
|||
expMaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxTocSize is the maximum size of a blob that we will attempt to process.
|
||||
// It is used to prevent DoS attacks from layers that embed a very large TOC file.
|
||||
maxTocSize = (1 << 20) * 150
|
||||
)
|
||||
|
||||
var typesToTar = map[string]byte{
|
||||
TypeReg: tar.TypeReg,
|
||||
TypeLink: tar.TypeLink,
|
||||
|
|
@ -38,33 +44,35 @@ func typeToTarType(t string) (byte, error) {
|
|||
return r, nil
|
||||
}
|
||||
|
||||
// readEstargzChunkedManifest reads the estargz manifest from the seekable stream blobStream.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) {
|
||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||
footerSize := int64(51)
|
||||
if blobSize <= footerSize {
|
||||
return nil, 0, errors.New("blob too small")
|
||||
}
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: uint64(blobSize - footerSize),
|
||||
Length: uint64(footerSize),
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)})
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
_, err = io.ReadFull(soe.stream, footer)
|
||||
_ = soe.stream.Close()
|
||||
}
|
||||
if soe.err != nil && err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
|
||||
/* Read the ToC offset:
|
||||
- 10 bytes gzip header
|
||||
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||
|
|
@ -81,48 +89,59 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
|
||||
size := int64(blobSize - footerSize - tocOffset)
|
||||
// set a reasonable limit
|
||||
if size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
if size > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz manifest too big to process in memory (%d bytes)", size))
|
||||
}
|
||||
|
||||
chunk = ImageSourceChunk{
|
||||
Offset: uint64(tocOffset),
|
||||
Length: uint64(size),
|
||||
}
|
||||
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)})
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var tocReader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
tocReader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tocReader.Close()
|
||||
var manifestUncompressed []byte
|
||||
|
||||
r, err := pgzip.NewReader(tocReader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer r.Close()
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
err1 := func() error {
|
||||
defer soe.stream.Close()
|
||||
|
||||
aTar := archivetar.NewReader(r)
|
||||
r, err := pgzip.NewReader(soe.stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
aTar := archivetar.NewReader(r)
|
||||
|
||||
manifestUncompressed := make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return nil, 0, err
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > maxTocSize {
|
||||
return errors.New("manifest too big")
|
||||
}
|
||||
|
||||
manifestUncompressed = make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
} else if err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
if manifestUncompressed == nil {
|
||||
return nil, 0, errors.New("manifest not found")
|
||||
}
|
||||
|
||||
manifestDigester := digest.Canonical.Digester()
|
||||
|
|
@ -140,10 +159,11 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ []byte, _ int64, retErr error) {
|
||||
offsetMetadata := annotations[minimal.ManifestInfoKey]
|
||||
if offsetMetadata == "" {
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey)
|
||||
}
|
||||
var manifestChunk ImageSourceChunk
|
||||
var manifestLengthUncompressed, manifestType uint64
|
||||
|
|
@ -153,48 +173,59 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
|
||||
var tarSplitChunk ImageSourceChunk
|
||||
var tarSplitLengthUncompressed uint64
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[minimal.TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if manifestType != internal.ManifestTypeCRFS {
|
||||
if manifestType != minimal.ManifestTypeCRFS {
|
||||
return nil, nil, nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if manifestChunk.Length > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestChunk.Length > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes compressed)", manifestChunk.Length))
|
||||
}
|
||||
if manifestLengthUncompressed > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestLengthUncompressed > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes uncompressed)", manifestLengthUncompressed))
|
||||
}
|
||||
|
||||
chunks := []ImageSourceChunk{manifestChunk}
|
||||
if tarSplitChunk.Offset > 0 {
|
||||
chunks = append(chunks, tarSplitChunk)
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||
|
||||
streamsOrErrors, err := getBlobAt(blobStream, chunks...)
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, err
|
||||
defer func() {
|
||||
err := ensureAllBlobsDone(streamsOrErrors)
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
soe, ok := <-streamsOrErrors
|
||||
if !ok {
|
||||
return nil, errors.New("stream closed")
|
||||
}
|
||||
if soe.err != nil {
|
||||
return nil, soe.err
|
||||
}
|
||||
defer soe.stream.Close()
|
||||
|
||||
blob := make([]byte, len)
|
||||
if _, err := io.ReadFull(reader, blob); err != nil {
|
||||
reader.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err := reader.Close(); err != nil {
|
||||
if _, err := io.ReadFull(soe.stream, blob); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blob, nil
|
||||
|
|
@ -217,7 +248,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
var decodedTarSplit []byte = nil
|
||||
if toc.TarSplitDigest != "" {
|
||||
if tarSplitChunk.Offset <= 0 {
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey)
|
||||
}
|
||||
tarSplit, err := readBlob(tarSplitChunk.Length)
|
||||
if err != nil {
|
||||
|
|
@ -247,11 +278,11 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
}
|
||||
|
||||
// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
|
||||
func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
||||
pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries
|
||||
func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
|
||||
pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries
|
||||
for i := range toc.Entries {
|
||||
e := &toc.Entries[i]
|
||||
if e.Type != internal.TypeChunk {
|
||||
if e.Type != minimal.TypeChunk {
|
||||
if _, ok := pendingFiles[e.Name]; ok {
|
||||
return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name)
|
||||
}
|
||||
|
|
@ -266,7 +297,7 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
|||
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
|
||||
}
|
||||
delete(pendingFiles, hdr.Name)
|
||||
expected, err := internal.NewFileMetadata(hdr)
|
||||
expected, err := minimal.NewFileMetadata(hdr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err)
|
||||
}
|
||||
|
|
@ -347,8 +378,8 @@ func ensureTimePointersMatch(a, b *time.Time) error {
|
|||
|
||||
// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
|
||||
// in the tar stream or matching contents)
|
||||
func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error {
|
||||
// Keep this in sync with internal.FileMetadata!
|
||||
func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error {
|
||||
// Keep this in sync with minimal.FileMetadata!
|
||||
|
||||
if a.Type != b.Type {
|
||||
return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type)
|
||||
|
|
|
|||
24
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
24
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
|
|
@ -9,7 +9,7 @@ import (
|
|||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
|
@ -213,7 +213,7 @@ func newTarSplitData(level int) (*tarSplitData, error) {
|
|||
compressed := bytes.NewBuffer(nil)
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
|
||||
zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -254,7 +254,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
|
||||
buf := make([]byte, 4096)
|
||||
|
||||
zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
|
||||
zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -276,7 +276,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
return offset, nil
|
||||
}
|
||||
|
||||
var metadata []internal.FileMetadata
|
||||
var metadata []minimal.FileMetadata
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
|
|
@ -341,9 +341,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
|
||||
chunkSize := rcReader.WrittenOut - lastChunkOffset
|
||||
if chunkSize > 0 {
|
||||
chunkType := internal.ChunkTypeData
|
||||
chunkType := minimal.ChunkTypeData
|
||||
if rcReader.IsLastChunkZeros {
|
||||
chunkType = internal.ChunkTypeZeros
|
||||
chunkType = minimal.ChunkTypeZeros
|
||||
}
|
||||
|
||||
chunks = append(chunks, chunk{
|
||||
|
|
@ -368,17 +368,17 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
}
|
||||
}
|
||||
|
||||
mainEntry, err := internal.NewFileMetadata(hdr)
|
||||
mainEntry, err := minimal.NewFileMetadata(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mainEntry.Digest = checksum
|
||||
mainEntry.Offset = startOffset
|
||||
mainEntry.EndOffset = lastOffset
|
||||
entries := []internal.FileMetadata{mainEntry}
|
||||
entries := []minimal.FileMetadata{mainEntry}
|
||||
for i := 1; i < len(chunks); i++ {
|
||||
entries = append(entries, internal.FileMetadata{
|
||||
Type: internal.TypeChunk,
|
||||
entries = append(entries, minimal.FileMetadata{
|
||||
Type: minimal.TypeChunk,
|
||||
Name: hdr.Name,
|
||||
ChunkOffset: chunks[i].ChunkOffset,
|
||||
})
|
||||
|
|
@ -424,13 +424,13 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
}
|
||||
tarSplitData.zstd = nil
|
||||
|
||||
ts := internal.TarSplitData{
|
||||
ts := minimal.TarSplitData{
|
||||
Data: tarSplitData.compressed.Bytes(),
|
||||
Digest: tarSplitData.digester.Digest(),
|
||||
UncompressedSize: tarSplitData.uncompressedCounter.Count,
|
||||
}
|
||||
|
||||
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
|
||||
return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
|
||||
}
|
||||
|
||||
type zstdChunkedWriter struct {
|
||||
|
|
|
|||
66
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
66
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
|
|
@ -9,10 +9,11 @@ import (
|
|||
"io"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
@ -85,17 +86,17 @@ func escapedOptional(val []byte, escape int) string {
|
|||
|
||||
func getStMode(mode uint32, typ string) (uint32, error) {
|
||||
switch typ {
|
||||
case internal.TypeReg, internal.TypeLink:
|
||||
case minimal.TypeReg, minimal.TypeLink:
|
||||
mode |= unix.S_IFREG
|
||||
case internal.TypeChar:
|
||||
case minimal.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case internal.TypeBlock:
|
||||
case minimal.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case internal.TypeDir:
|
||||
case minimal.TypeDir:
|
||||
mode |= unix.S_IFDIR
|
||||
case internal.TypeFifo:
|
||||
case minimal.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
case internal.TypeSymlink:
|
||||
case minimal.TypeSymlink:
|
||||
mode |= unix.S_IFLNK
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown type %s", typ)
|
||||
|
|
@ -103,24 +104,14 @@ func getStMode(mode uint32, typ string) (uint32, error) {
|
|||
return mode, nil
|
||||
}
|
||||
|
||||
func sanitizeName(name string) string {
|
||||
path := filepath.Clean(name)
|
||||
if path == "." {
|
||||
path = "/"
|
||||
} else if path[0] != '/' {
|
||||
path = "/" + path
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
|
||||
path := sanitizeName(entry.Name)
|
||||
func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *minimal.FileMetadata) error {
|
||||
path := storagePath.CleanAbsPath(entry.Name)
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
if _, found := added[parent]; !found && path != "/" {
|
||||
parentEntry := &internal.FileMetadata{
|
||||
parentEntry := &minimal.FileMetadata{
|
||||
Name: parent,
|
||||
Type: internal.TypeDir,
|
||||
Type: minimal.TypeDir,
|
||||
Mode: 0o755,
|
||||
}
|
||||
if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil {
|
||||
|
|
@ -143,7 +134,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
|||
|
||||
nlinks := links[entry.Name] + links[entry.Linkname] + 1
|
||||
link := ""
|
||||
if entry.Type == internal.TypeLink {
|
||||
if entry.Type == minimal.TypeLink {
|
||||
link = "@"
|
||||
}
|
||||
|
||||
|
|
@ -169,16 +160,21 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
|||
|
||||
var payload string
|
||||
if entry.Linkname != "" {
|
||||
if entry.Type == internal.TypeSymlink {
|
||||
if entry.Type == minimal.TypeSymlink {
|
||||
payload = entry.Linkname
|
||||
} else {
|
||||
payload = sanitizeName(entry.Linkname)
|
||||
payload = storagePath.CleanAbsPath(entry.Linkname)
|
||||
}
|
||||
} else {
|
||||
if len(entry.Digest) > 10 {
|
||||
d := strings.Replace(entry.Digest, "sha256:", "", 1)
|
||||
payload = d[:2] + "/" + d[2:]
|
||||
} else if entry.Digest != "" {
|
||||
d, err := digest.Parse(entry.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid digest %q for %q: %w", entry.Digest, entry.Name, err)
|
||||
}
|
||||
path, err := storagePath.RegularFilePathForValidatedDigest(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining physical file path for %q: %w", entry.Name, err)
|
||||
}
|
||||
payload = path
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil {
|
||||
|
|
@ -219,7 +215,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
|||
|
||||
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
|
||||
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
|
||||
toc, ok := tocI.(*internal.TOC)
|
||||
toc, ok := tocI.(*minimal.TOC)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid TOC type")
|
||||
}
|
||||
|
|
@ -235,21 +231,21 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
|
|||
}()
|
||||
|
||||
links := make(map[string]int)
|
||||
added := make(map[string]*internal.FileMetadata)
|
||||
added := make(map[string]*minimal.FileMetadata)
|
||||
for _, e := range toc.Entries {
|
||||
if e.Linkname == "" {
|
||||
continue
|
||||
}
|
||||
if e.Type == internal.TypeSymlink {
|
||||
if e.Type == minimal.TypeSymlink {
|
||||
continue
|
||||
}
|
||||
links[e.Linkname] = links[e.Linkname] + 1
|
||||
}
|
||||
|
||||
if len(toc.Entries) == 0 {
|
||||
root := &internal.FileMetadata{
|
||||
root := &minimal.FileMetadata{
|
||||
Name: "/",
|
||||
Type: internal.TypeDir,
|
||||
Type: minimal.TypeDir,
|
||||
Mode: 0o755,
|
||||
}
|
||||
|
||||
|
|
@ -261,7 +257,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
|
|||
}
|
||||
|
||||
for _, e := range toc.Entries {
|
||||
if e.Type == internal.TypeChunk {
|
||||
if e.Type == minimal.TypeChunk {
|
||||
continue
|
||||
}
|
||||
if err := dumpNode(w, added, links, verityDigests, &e); err != nil {
|
||||
|
|
|
|||
73
vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
generated
vendored
73
vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
generated
vendored
|
|
@ -15,7 +15,8 @@ import (
|
|||
|
||||
driversCopy "github.com/containers/storage/drivers/copy"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"golang.org/x/sys/unix"
|
||||
|
|
@ -34,14 +35,14 @@ func procPathForFd(fd int) string {
|
|||
return fmt.Sprintf("/proc/self/fd/%d", fd)
|
||||
}
|
||||
|
||||
// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
|
||||
// fileMetadata is a wrapper around minimal.FileMetadata with additional private fields that
|
||||
// are not part of the TOC document.
|
||||
// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
|
||||
type fileMetadata struct {
|
||||
internal.FileMetadata
|
||||
minimal.FileMetadata
|
||||
|
||||
// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
|
||||
chunks []*internal.FileMetadata
|
||||
chunks []*minimal.FileMetadata
|
||||
|
||||
// skipSetAttrs is set when the file attributes must not be
|
||||
// modified, e.g. it is a hard link from a different source,
|
||||
|
|
@ -49,10 +50,37 @@ type fileMetadata struct {
|
|||
skipSetAttrs bool
|
||||
}
|
||||
|
||||
// splitPath takes a file path as input and returns two components: dir and base.
|
||||
// Differently than filepath.Split(), this function handles some edge cases.
|
||||
// If the path refers to a file in the root directory, the returned dir is "/".
|
||||
// The returned base value is never empty, it never contains any slash and the
|
||||
// value "..".
|
||||
func splitPath(path string) (string, string, error) {
|
||||
path = storagePath.CleanAbsPath(path)
|
||||
dir, base := filepath.Split(path)
|
||||
if base == "" {
|
||||
base = "."
|
||||
}
|
||||
// Remove trailing slashes from dir, but make sure that "/" is preserved.
|
||||
dir = strings.TrimSuffix(dir, "/")
|
||||
if dir == "" {
|
||||
dir = "/"
|
||||
}
|
||||
|
||||
if strings.Contains(base, "/") {
|
||||
// This should never happen, but be safe as the base is passed to *at syscalls.
|
||||
return "", "", fmt.Errorf("internal error: splitPath(%q) contains a slash", path)
|
||||
}
|
||||
return dir, base, nil
|
||||
}
|
||||
|
||||
func doHardLink(dirfd, srcFd int, destFile string) error {
|
||||
destDir, destBase := filepath.Split(destFile)
|
||||
destDir, destBase, err := splitPath(destFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDirFd := dirfd
|
||||
if destDir != "" && destDir != "." {
|
||||
if destDir != "/" {
|
||||
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -72,7 +100,7 @@ func doHardLink(dirfd, srcFd int, destFile string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
err := doLink()
|
||||
err = doLink()
|
||||
|
||||
// if the destination exists, unlink it first and try again
|
||||
if err != nil && os.IsExist(err) {
|
||||
|
|
@ -281,8 +309,11 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
|
|||
// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
|
||||
// last component as the path to openat().
|
||||
if hasNoFollow {
|
||||
dirName, baseName := filepath.Split(name)
|
||||
if dirName != "" && dirName != "." {
|
||||
dirName, baseName, err := splitPath(name)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if dirName != "/" {
|
||||
newRoot, err := securejoin.SecureJoin(root, dirName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
|
|
@ -409,7 +440,8 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil
|
|||
|
||||
if errors.Is(err, unix.ENOENT) {
|
||||
parent := filepath.Dir(name)
|
||||
if parent != "" {
|
||||
// do not create the root directory, it should always exist
|
||||
if parent != name {
|
||||
pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode)
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
|
|
@ -448,9 +480,12 @@ func appendHole(fd int, name string, size int64) error {
|
|||
}
|
||||
|
||||
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
|
||||
parent, base := filepath.Split(name)
|
||||
parent, base, err := splitPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parentFd := dirfd
|
||||
if parent != "" && parent != "." {
|
||||
if parent != "/" {
|
||||
parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -506,9 +541,12 @@ func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *arch
|
|||
}
|
||||
|
||||
func safeSymlink(dirfd int, metadata *fileMetadata) error {
|
||||
destDir, destBase := filepath.Split(metadata.Name)
|
||||
destDir, destBase, err := splitPath(metadata.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDirFd := dirfd
|
||||
if destDir != "" && destDir != "." {
|
||||
if destDir != "/" {
|
||||
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -542,9 +580,12 @@ func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
|
|||
}
|
||||
|
||||
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
|
||||
dir, base := filepath.Split(path)
|
||||
dir, base, err := splitPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirfd := d.Dirfd
|
||||
if dir != "" && dir != "." {
|
||||
if dir != "/" {
|
||||
dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package internal
|
||||
package minimal
|
||||
|
||||
// NOTE: This is used from github.com/containers/image by callers that
|
||||
// don't otherwise use containers/storage, so don't make this depend on any
|
||||
27
vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
generated
vendored
Normal file
27
vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
package path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// CleanAbsPath removes any ".." and "." from the path
|
||||
// and ensures it starts with a "/". If the path refers to the root
|
||||
// directory, it returns "/".
|
||||
func CleanAbsPath(path string) string {
|
||||
return filepath.Clean("/" + path)
|
||||
}
|
||||
|
||||
// RegularFilePath returns the path used in the composefs backing store for a
|
||||
// regular file with the provided content digest.
|
||||
//
|
||||
// The caller MUST ensure d is a valid digest (in particular, that it contains no path separators or .. entries)
|
||||
func RegularFilePathForValidatedDigest(d digest.Digest) (string, error) {
|
||||
if algo := d.Algorithm(); algo != digest.SHA256 {
|
||||
return "", fmt.Errorf("unexpected digest algorithm %q", algo)
|
||||
}
|
||||
e := d.Encoded()
|
||||
return e[0:2] + "/" + e[2:], nil
|
||||
}
|
||||
614
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
614
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package chunked
|
|||
|
||||
import (
|
||||
archivetar "archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
|
|
@ -22,17 +23,21 @@ import (
|
|||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
path "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/fsverity"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
tsStorage "github.com/vbatts/tar-split/tar/storage"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
@ -57,46 +62,53 @@ const (
|
|||
type compressedFileType int
|
||||
|
||||
type chunkedDiffer struct {
|
||||
// Initial parameters, used throughout and never modified
|
||||
// ==========
|
||||
pullOptions pullOptions
|
||||
stream ImageSourceSeekable
|
||||
manifest []byte
|
||||
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
layersCache *layersCache
|
||||
tocOffset int64
|
||||
fileType compressedFileType
|
||||
|
||||
copyBuffer []byte
|
||||
|
||||
gzipReader *pgzip.Reader
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled.
|
||||
tocDigest digest.Digest
|
||||
// blobDigest is the digest of the whole compressed layer. It is used if
|
||||
// convertToZstdChunked to validate a layer when it is converted since there
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
blobSize int64
|
||||
|
||||
// Input format
|
||||
// ==========
|
||||
fileType compressedFileType
|
||||
// convertedToZstdChunked is set to true if the layer needs to
|
||||
// be converted to the zstd:chunked format before it can be
|
||||
// handled.
|
||||
convertToZstdChunked bool
|
||||
|
||||
// Chunked metadata
|
||||
// This is usually set in GetDiffer, but if convertToZstdChunked, it is only computed in chunkedDiffer.ApplyDiff
|
||||
// ==========
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled, or "" if not relevant to consumers.
|
||||
tocDigest digest.Digest
|
||||
tocOffset int64
|
||||
manifest []byte
|
||||
toc *minimal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
// skipValidation is set to true if the individual files in
|
||||
// the layer are trusted and should not be validated.
|
||||
skipValidation bool
|
||||
|
||||
// blobDigest is the digest of the whole compressed layer. It is used if
|
||||
// convertToZstdChunked to validate a layer when it is converted since there
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
|
||||
blobSize int64
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
|
||||
pullOptions map[string]string
|
||||
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
// Long-term caches
|
||||
// This is set in GetDiffer, when the caller must not hold any storage locks, and later consumed in .ApplyDiff()
|
||||
// ==========
|
||||
layersCache *layersCache
|
||||
copyBuffer []byte
|
||||
fsVerityMutex sync.Mutex // protects fsVerityDigests
|
||||
fsVerityDigests map[string]string
|
||||
fsVerityMutex sync.Mutex
|
||||
|
||||
// Private state of .ApplyDiff
|
||||
// ==========
|
||||
gzipReader *pgzip.Reader
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
|
|
@ -108,6 +120,42 @@ type chunkedLayerData struct {
|
|||
Format graphdriver.DifferOutputFormat `json:"format"`
|
||||
}
|
||||
|
||||
// pullOptions contains parsed data from storage.Store.PullOptions.
|
||||
// TO DO: ideally this should be parsed along with the rest of the config file into StoreOptions directly
|
||||
// (and then storage.Store.PullOptions would need to be somehow simulated).
|
||||
type pullOptions struct {
|
||||
enablePartialImages bool // enable_partial_images
|
||||
convertImages bool // convert_images
|
||||
useHardLinks bool // use_hard_links
|
||||
insecureAllowUnpredictableImageContents bool // insecure_allow_unpredictable_image_contents
|
||||
ostreeRepos []string // ostree_repos
|
||||
}
|
||||
|
||||
func parsePullOptions(store storage.Store) pullOptions {
|
||||
options := store.PullOptions()
|
||||
|
||||
res := pullOptions{}
|
||||
for _, e := range []struct {
|
||||
dest *bool
|
||||
name string
|
||||
defaultValue bool
|
||||
}{
|
||||
{&res.enablePartialImages, "enable_partial_images", false},
|
||||
{&res.convertImages, "convert_images", false},
|
||||
{&res.useHardLinks, "use_hard_links", false},
|
||||
{&res.insecureAllowUnpredictableImageContents, "insecure_allow_unpredictable_image_contents", false},
|
||||
} {
|
||||
if value, ok := options[e.name]; ok {
|
||||
*e.dest = strings.ToLower(value) == "true"
|
||||
} else {
|
||||
*e.dest = e.defaultValue
|
||||
}
|
||||
}
|
||||
res.ostreeRepos = strings.Split(options["ostree_repos"], ":")
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
|
||||
diff, err := archive.DecompressStream(payload)
|
||||
if err != nil {
|
||||
|
|
@ -144,127 +192,160 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
|
|||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// If it returns an error that matches ErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// retry the operation with a different method.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
pullOptions := store.PullOptions()
|
||||
pullOptions := parsePullOptions(store)
|
||||
|
||||
if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) {
|
||||
// If convertImages is set, the two options disagree whether fallback is permissible.
|
||||
if !pullOptions.enablePartialImages {
|
||||
// If pullOptions.convertImages is set, the two options disagree whether fallback is permissible.
|
||||
// Right now, we enable it, but that’s not a promise; rather, such a configuration should ideally be rejected.
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
|
||||
}
|
||||
// convertImages also serves as a “must not fallback to non-partial pull” option (?!)
|
||||
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
|
||||
// pullOptions.convertImages also serves as a “must not fallback to non-partial pull” option (?!)
|
||||
|
||||
graphDriver, err := store.GraphDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported {
|
||||
if convertImages {
|
||||
if pullOptions.convertImages {
|
||||
return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String())
|
||||
}
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String()))
|
||||
}
|
||||
|
||||
differ, canFallback, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
|
||||
differ, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
|
||||
if err != nil {
|
||||
if !canFallback {
|
||||
var fallbackErr ErrFallbackToOrdinaryLayerDownload
|
||||
if !errors.As(err, &fallbackErr) {
|
||||
return nil, err
|
||||
}
|
||||
// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
|
||||
if convertImages {
|
||||
logrus.Debugf("Created differ to convert blob %q", blobDigest)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
if !pullOptions.convertImages {
|
||||
return nil, err
|
||||
}
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(err)
|
||||
var canConvertErr errFallbackCanConvert
|
||||
if !errors.As(err, &canConvertErr) {
|
||||
// We are supposed to use makeConvertFromRawDiffer, but that would not work.
|
||||
// Fail, and make sure the error does _not_ match ErrFallbackToOrdinaryLayerDownload: use only the error text,
|
||||
// discard all type information.
|
||||
return nil, fmt.Errorf("neither a partial pull nor convert_images is possible: %s", err.Error())
|
||||
}
|
||||
logrus.Debugf("Created differ to convert blob %q", blobDigest)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
}
|
||||
|
||||
return differ, nil
|
||||
}
|
||||
|
||||
// errFallbackCanConvert is an an error type _accompanying_ ErrFallbackToOrdinaryLayerDownload
|
||||
// within getProperDiffer, to mark that using makeConvertFromRawDiffer makes sense.
|
||||
// This is used to distinguish between cases where the environment does not support partial pulls
|
||||
// (e.g. a registry does not support range requests) and convert_images is still possible,
|
||||
// from cases where the image content is unacceptable for partial pulls (e.g. exceeds memory limits)
|
||||
// and convert_images would not help.
|
||||
type errFallbackCanConvert struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errFallbackCanConvert) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
func (e errFallbackCanConvert) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// getProperDiffer is an implementation detail of GetDiffer.
|
||||
// It returns a “proper” differ (not a convert_images one) if possible.
|
||||
// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
|
||||
// is permissible.
|
||||
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) {
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
// May return an error matching ErrFallbackToOrdinaryLayerDownload if a fallback to an alternative
|
||||
// (either makeConvertFromRawDiffer, or a non-partial pull) is permissible.
|
||||
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (graphdriver.Differ, error) {
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[minimal.ManifestChecksumKey]
|
||||
estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||
|
||||
switch {
|
||||
case hasZstdChunkedTOC && hasEstargzTOC:
|
||||
return nil, false, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
|
||||
case hasZstdChunkedTOC:
|
||||
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err)
|
||||
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
|
||||
var badRequestErr ErrBadRequest
|
||||
return nil, errors.As(err, &badRequestErr), err
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
|
||||
return differ, false, nil
|
||||
return differ, nil
|
||||
|
||||
case hasEstargzTOC:
|
||||
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err)
|
||||
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
|
||||
var badRequestErr ErrBadRequest
|
||||
return nil, errors.As(err, &badRequestErr), err
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
|
||||
return differ, false, nil
|
||||
return differ, nil
|
||||
|
||||
default: // no TOC
|
||||
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
|
||||
if !convertImages {
|
||||
return nil, true, errors.New("no TOC found and convert_images is not configured")
|
||||
message := "no TOC found"
|
||||
if !pullOptions.convertImages {
|
||||
message = "no TOC found and convert_images is not configured"
|
||||
}
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(errors.New(message)),
|
||||
}
|
||||
return nil, true, errors.New("no TOC found")
|
||||
}
|
||||
}
|
||||
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
layersCache, err := getLayersCache(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // Will be computed later
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
|
||||
convertToZstdChunked: true,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
layersCache: layersCache,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
|
||||
uncompressedTarSize: -1, // Will be computed later
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
// makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
if err != nil {
|
||||
if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
||||
var uncompressedTarSize int64 = -1
|
||||
if tarSplit != nil {
|
||||
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("computing size from tar-split: %w", err)
|
||||
}
|
||||
} else if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked layers without tar-split data don't support partial pulls with guaranteed consistency with non-partial pulls")),
|
||||
}
|
||||
}
|
||||
|
||||
layersCache, err := getLayersCache(store)
|
||||
|
|
@ -273,25 +354,36 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
|
|||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: uncompressedTarSize,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobSize: blobSize,
|
||||
|
||||
fileType: fileTypeZstdChunked,
|
||||
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
tocOffset: tocOffset,
|
||||
manifest: manifest,
|
||||
toc: toc,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
uncompressedTarSize: uncompressedTarSize,
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
// makeEstargzChunkedDiffer sets up a chunkedDiffer for an estargz layer.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz layers don't support partial pulls with guaranteed consistency with non-partial pulls")),
|
||||
}
|
||||
}
|
||||
|
||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
|
||||
if err != nil {
|
||||
if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
layersCache, err := getLayersCache(store)
|
||||
|
|
@ -300,17 +392,20 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig
|
|||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobSize: blobSize,
|
||||
|
||||
fileType: fileTypeEstargz,
|
||||
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeEstargz,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tocOffset: tocOffset,
|
||||
manifest: manifest,
|
||||
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -391,7 +486,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
|
|||
}
|
||||
// fill only the attributes used by canDedupMetadataWithHardLink.
|
||||
otherFile := fileMetadata{
|
||||
FileMetadata: internal.FileMetadata{
|
||||
FileMetadata: minimal.FileMetadata{
|
||||
UID: int(st.Uid),
|
||||
GID: int(st.Gid),
|
||||
Mode: int64(st.Mode),
|
||||
|
|
@ -735,7 +830,12 @@ func (d *destinationFile) Close() (Err error) {
|
|||
}
|
||||
}
|
||||
|
||||
return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
|
||||
mode := os.FileMode(d.metadata.Mode)
|
||||
if d.options.ForceMask != nil {
|
||||
mode = *d.options.ForceMask
|
||||
}
|
||||
|
||||
return setFileAttrs(d.dirfd, d.file, mode, d.metadata, d.options, false)
|
||||
}
|
||||
|
||||
func closeDestinationFiles(files chan *destinationFile, errors chan error) {
|
||||
|
|
@ -1038,13 +1138,6 @@ type hardLinkToCreate struct {
|
|||
metadata *fileMetadata
|
||||
}
|
||||
|
||||
func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool {
|
||||
if value, ok := pullOptions[name]; ok {
|
||||
return strings.ToLower(value) == "true"
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
type findAndCopyFileOptions struct {
|
||||
useHardLinks bool
|
||||
ostreeRepos []string
|
||||
|
|
@ -1111,10 +1204,13 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
||||
// makeEntriesFlat collects regular-file entries from mergedEntries, and produces a new list
|
||||
// where each file content is only represented once, and uses composefs.RegularFilePathForValidatedDigest for its name.
|
||||
// If flatPathNameMap is not nil, this function writes to it a mapping from filepath.Clean(originalName) to the composefs name.
|
||||
func makeEntriesFlat(mergedEntries []fileMetadata, flatPathNameMap map[string]string) ([]fileMetadata, error) {
|
||||
var new []fileMetadata
|
||||
|
||||
hashes := make(map[string]string)
|
||||
knownFlatPaths := make(map[string]struct{})
|
||||
for i := range mergedEntries {
|
||||
if mergedEntries[i].Type != TypeReg {
|
||||
continue
|
||||
|
|
@ -1124,16 +1220,22 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
|||
}
|
||||
digest, err := digest.Parse(mergedEntries[i].Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("invalid digest %q for %q: %w", mergedEntries[i].Digest, mergedEntries[i].Name, err)
|
||||
}
|
||||
path, err := path.RegularFilePathForValidatedDigest(digest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("determining physical file path for %q: %w", mergedEntries[i].Name, err)
|
||||
}
|
||||
if flatPathNameMap != nil {
|
||||
flatPathNameMap[filepath.Clean(mergedEntries[i].Name)] = path
|
||||
}
|
||||
d := digest.Encoded()
|
||||
|
||||
if hashes[d] != "" {
|
||||
if _, known := knownFlatPaths[path]; known {
|
||||
continue
|
||||
}
|
||||
hashes[d] = d
|
||||
knownFlatPaths[path] = struct{}{}
|
||||
|
||||
mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
|
||||
mergedEntries[i].Name = path
|
||||
mergedEntries[i].skipSetAttrs = true
|
||||
|
||||
new = append(new, mergedEntries[i])
|
||||
|
|
@ -1141,44 +1243,140 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
|||
return new, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
|
||||
var payload io.ReadCloser
|
||||
var streams chan io.ReadCloser
|
||||
var errs chan error
|
||||
var err error
|
||||
type streamOrErr struct {
|
||||
stream io.ReadCloser
|
||||
err error
|
||||
}
|
||||
|
||||
chunksToRequest := []ImageSourceChunk{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: uint64(c.blobSize),
|
||||
},
|
||||
// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered.
|
||||
func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) {
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
_ = soe.stream.Close()
|
||||
} else if retErr == nil {
|
||||
retErr = soe.err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
|
||||
// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends
|
||||
// either a stream or an error to the stream channel. The streams channel is closed when
|
||||
// there are no more streams and errors to read.
|
||||
// It ensures that no more than maxStreams streams are returned, and that every item from the
|
||||
// streams and errs channels is consumed.
|
||||
func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) {
|
||||
tooManyStreams := false
|
||||
streamsSoFar := 0
|
||||
|
||||
err := errors.New("Unexpected error in getBlobAtGoroutine")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
close(stream)
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case p, ok := <-streams:
|
||||
if !ok {
|
||||
streams = nil
|
||||
break loop
|
||||
}
|
||||
if streamsSoFar >= maxStreams {
|
||||
tooManyStreams = true
|
||||
_ = p.Close()
|
||||
continue
|
||||
}
|
||||
streamsSoFar++
|
||||
stream <- streamOrErr{stream: p}
|
||||
case err, ok := <-errs:
|
||||
if !ok {
|
||||
errs = nil
|
||||
break loop
|
||||
}
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
}
|
||||
if streams != nil {
|
||||
for p := range streams {
|
||||
if streamsSoFar >= maxStreams {
|
||||
tooManyStreams = true
|
||||
_ = p.Close()
|
||||
continue
|
||||
}
|
||||
streamsSoFar++
|
||||
stream <- streamOrErr{stream: p}
|
||||
}
|
||||
}
|
||||
if errs != nil {
|
||||
for err := range errs {
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
}
|
||||
if tooManyStreams {
|
||||
stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
|
||||
// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt.
|
||||
// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel
|
||||
// always succeeds in select.
|
||||
// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`.
|
||||
func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) {
|
||||
streams, errs, err := is.GetBlobAt(chunksToRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream := make(chan streamOrErr)
|
||||
go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest))
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
|
||||
streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
select {
|
||||
case p := <-streams:
|
||||
payload = p
|
||||
case err := <-errs:
|
||||
return "", err
|
||||
}
|
||||
if payload == nil {
|
||||
return "", errors.New("invalid stream returned")
|
||||
}
|
||||
defer payload.Close()
|
||||
|
||||
originalRawDigester := digest.Canonical.Digester()
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
r := io.TeeReader(soe.stream, originalRawDigester.Hash())
|
||||
|
||||
r := io.TeeReader(payload, originalRawDigester.Hash())
|
||||
|
||||
// copy the entire tarball and compute its digest
|
||||
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
|
||||
|
||||
// copy the entire tarball and compute its digest
|
||||
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
|
||||
_ = soe.stream.Close()
|
||||
}
|
||||
if soe.err != nil && err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
return originalRawDigester.Digest(), err
|
||||
}
|
||||
|
||||
func typeToOsMode(typ string) (os.FileMode, error) {
|
||||
switch typ {
|
||||
case TypeReg, TypeLink:
|
||||
return 0, nil
|
||||
case TypeSymlink:
|
||||
return os.ModeSymlink, nil
|
||||
case TypeDir:
|
||||
return os.ModeDir, nil
|
||||
case TypeChar:
|
||||
return os.ModeDevice | os.ModeCharDevice, nil
|
||||
case TypeBlock:
|
||||
return os.ModeDevice, nil
|
||||
case TypeFifo:
|
||||
return os.ModeNamedPipe, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unknown file type %q", typ)
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
defer c.layersCache.release()
|
||||
defer func() {
|
||||
|
|
@ -1298,13 +1496,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
Size: c.uncompressedTarSize,
|
||||
}
|
||||
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
// modifies the source file as well.
|
||||
useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false)
|
||||
|
||||
// List of OSTree repositories to use for deduplication
|
||||
ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":")
|
||||
|
||||
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
|
||||
var missingParts []missingPart
|
||||
|
|
@ -1325,7 +1516,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
if err == nil {
|
||||
value := idtools.Stat{
|
||||
IDs: idtools.IDPair{UID: int(uid), GID: int(gid)},
|
||||
Mode: os.FileMode(mode),
|
||||
Mode: os.ModeDir | os.FileMode(mode),
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
|
||||
return output, err
|
||||
|
|
@ -1337,16 +1528,20 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
if err != nil {
|
||||
return output, &fs.PathError{Op: "open", Path: dest, Err: err}
|
||||
}
|
||||
defer unix.Close(dirfd)
|
||||
dirFile := os.NewFile(uintptr(dirfd), dest)
|
||||
defer dirFile.Close()
|
||||
|
||||
var flatPathNameMap map[string]string // = nil
|
||||
if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
|
||||
mergedEntries, err = makeEntriesFlat(mergedEntries)
|
||||
flatPathNameMap = map[string]string{}
|
||||
mergedEntries, err = makeEntriesFlat(mergedEntries, flatPathNameMap)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
createdDirs := make(map[string]struct{})
|
||||
for _, e := range mergedEntries {
|
||||
d := e.Name[0:2]
|
||||
// This hard-codes an assumption that RegularFilePathForValidatedDigest creates paths with exactly one directory component.
|
||||
d := filepath.Dir(e.Name)
|
||||
if _, found := createdDirs[d]; !found {
|
||||
if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
|
||||
return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
|
||||
|
|
@ -1363,8 +1558,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
missingPartsSize, totalChunksSize := int64(0), int64(0)
|
||||
|
||||
copyOptions := findAndCopyFileOptions{
|
||||
useHardLinks: useHardLinks,
|
||||
ostreeRepos: ostreeRepos,
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
// modifies the source file as well.
|
||||
useHardLinks: c.pullOptions.useHardLinks,
|
||||
ostreeRepos: c.pullOptions.ostreeRepos, // List of OSTree repositories to use for deduplication
|
||||
options: options,
|
||||
}
|
||||
|
||||
|
|
@ -1408,13 +1605,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
filesToWaitFor := 0
|
||||
for i := range mergedEntries {
|
||||
r := &mergedEntries[i]
|
||||
if options.ForceMask != nil {
|
||||
value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode))
|
||||
if r.Xattrs == nil {
|
||||
r.Xattrs = make(map[string]string)
|
||||
}
|
||||
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
|
||||
mode := os.FileMode(r.Mode)
|
||||
|
||||
|
|
@ -1423,10 +1613,37 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
return output, err
|
||||
}
|
||||
|
||||
r.Name = filepath.Clean(r.Name)
|
||||
size := r.Size
|
||||
|
||||
// update also the implementation of ForceMask in pkg/archive
|
||||
if options.ForceMask != nil {
|
||||
mode = *options.ForceMask
|
||||
|
||||
// special files will be stored as regular files
|
||||
if t != tar.TypeDir && t != tar.TypeSymlink && t != tar.TypeReg && t != tar.TypeLink {
|
||||
t = tar.TypeReg
|
||||
size = 0
|
||||
}
|
||||
|
||||
// if the entry will be stored as a directory or a regular file, store in a xattr the original
|
||||
// owner and mode.
|
||||
if t == tar.TypeDir || t == tar.TypeReg {
|
||||
typeMode, err := typeToOsMode(r.Type)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
value := idtools.FormatContainersOverrideXattrDevice(r.UID, r.GID, typeMode|fs.FileMode(r.Mode), int(r.Devmajor), int(r.Devminor))
|
||||
if r.Xattrs == nil {
|
||||
r.Xattrs = make(map[string]string)
|
||||
}
|
||||
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
}
|
||||
|
||||
r.Name = path.CleanAbsPath(r.Name)
|
||||
// do not modify the value of symlinks
|
||||
if r.Linkname != "" && t != tar.TypeSymlink {
|
||||
r.Linkname = filepath.Clean(r.Linkname)
|
||||
r.Linkname = path.CleanAbsPath(r.Linkname)
|
||||
}
|
||||
|
||||
if whiteoutConverter != nil {
|
||||
|
|
@ -1434,8 +1651,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
Typeflag: t,
|
||||
Name: r.Name,
|
||||
Linkname: r.Linkname,
|
||||
Size: r.Size,
|
||||
Mode: r.Mode,
|
||||
Size: size,
|
||||
Mode: int64(mode),
|
||||
Uid: r.UID,
|
||||
Gid: r.GID,
|
||||
}
|
||||
|
|
@ -1454,7 +1671,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
switch t {
|
||||
case tar.TypeReg:
|
||||
// Create directly empty files.
|
||||
if r.Size == 0 {
|
||||
if size == 0 {
|
||||
// Used to have a scope for cleanup.
|
||||
createEmptyFile := func() error {
|
||||
file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0)
|
||||
|
|
@ -1474,7 +1691,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
|
||||
case tar.TypeDir:
|
||||
if r.Name == "" || r.Name == "." {
|
||||
if r.Name == "/" {
|
||||
output.RootDirMode = &mode
|
||||
}
|
||||
if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil {
|
||||
|
|
@ -1509,7 +1726,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
return output, fmt.Errorf("invalid type %q", t)
|
||||
}
|
||||
|
||||
totalChunksSize += r.Size
|
||||
totalChunksSize += size
|
||||
|
||||
if t == tar.TypeReg {
|
||||
index := i
|
||||
|
|
@ -1572,7 +1789,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
|
||||
switch chunk.ChunkType {
|
||||
case internal.ChunkTypeData:
|
||||
case minimal.ChunkTypeData:
|
||||
root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
|
||||
if err != nil {
|
||||
return output, err
|
||||
|
|
@ -1585,7 +1802,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
Offset: offset,
|
||||
}
|
||||
}
|
||||
case internal.ChunkTypeZeros:
|
||||
case minimal.ChunkTypeZeros:
|
||||
missingPartsSize -= size
|
||||
mp.Hole = true
|
||||
// Mark all chunks belonging to the missing part as holes
|
||||
|
|
@ -1609,6 +1826,39 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
}
|
||||
|
||||
// To ensure that consumers of the layer who decompress and read the full tar stream,
|
||||
// and consumers who consume the data via the TOC, both see exactly the same data and metadata,
|
||||
// compute the UncompressedDigest.
|
||||
// c/image will then ensure that this value matches the value in the image config’s RootFS.DiffID, i.e. the image must commit
|
||||
// to one UncompressedDigest value for each layer, and that will avoid the ambiguity (in consumers who validate layers against DiffID).
|
||||
//
|
||||
// c/image also uses the UncompressedDigest as a layer ID, allowing it to use the traditional layer and image IDs.
|
||||
//
|
||||
// This is, sadly, quite costly: Up to now we might have only have had to write, and digest, only the new/modified files.
|
||||
// Here we need to read, and digest, the whole layer, even if almost all of it was already present locally previously.
|
||||
// So, really specialized (EXTREMELY RARE) users can opt out of this check using insecureAllowUnpredictableImageContents .
|
||||
//
|
||||
// Layers without a tar-split (estargz layers and old zstd:chunked layers) can't produce an UncompressedDigest that
|
||||
// matches the expected RootFS.DiffID; we always fall back to full pulls, again unless the user opts out
|
||||
// via insecureAllowUnpredictableImageContents .
|
||||
if output.UncompressedDigest == "" {
|
||||
switch {
|
||||
case c.pullOptions.insecureAllowUnpredictableImageContents:
|
||||
// Oh well. Skip the costly digest computation.
|
||||
case output.TarSplit != nil:
|
||||
metadata := tsStorage.NewJSONUnpacker(bytes.NewReader(output.TarSplit))
|
||||
fg := newStagedFileGetter(dirFile, flatPathNameMap)
|
||||
digester := digest.Canonical.Digester()
|
||||
if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil {
|
||||
return output, fmt.Errorf("digesting staged uncompressed stream: %w", err)
|
||||
}
|
||||
output.UncompressedDigest = digester.Digest()
|
||||
default:
|
||||
// We are checking for this earlier in GetDiffer, so this should not be reachable.
|
||||
return output, fmt.Errorf(`internal error: layer's UncompressedDigest is unknown and "insecure_allow_unpredictable_image_contents" is not set`)
|
||||
}
|
||||
}
|
||||
|
||||
if totalChunksSize > 0 {
|
||||
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
|
||||
}
|
||||
|
|
@ -1618,7 +1868,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
return output, nil
|
||||
}
|
||||
|
||||
func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
||||
func mustSkipFile(fileType compressedFileType, e minimal.FileMetadata) bool {
|
||||
// ignore the metadata files for the estargz format.
|
||||
if fileType != fileTypeEstargz {
|
||||
return false
|
||||
|
|
@ -1631,7 +1881,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []minimal.FileMetadata) ([]fileMetadata, error) {
|
||||
countNextChunks := func(start int) int {
|
||||
count := 0
|
||||
for _, e := range entries[start:] {
|
||||
|
|
@ -1668,7 +1918,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
|||
if e.Type == TypeReg {
|
||||
nChunks := countNextChunks(i + 1)
|
||||
|
||||
e.chunks = make([]*internal.FileMetadata, nChunks+1)
|
||||
e.chunks = make([]*minimal.FileMetadata, nChunks+1)
|
||||
for j := 0; j <= nChunks; j++ {
|
||||
// we need a copy here, otherwise we override the
|
||||
// .Size later
|
||||
|
|
@ -1703,7 +1953,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
|||
|
||||
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
|
||||
// same digest as chunk.ChunkDigest
|
||||
func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
|
||||
func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
|
||||
parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return false
|
||||
|
|
@ -1734,3 +1984,33 @@ func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offs
|
|||
|
||||
return digester.Digest() == digest
|
||||
}
|
||||
|
||||
// newStagedFileGetter returns an object usable as storage.FileGetter for rootDir.
|
||||
// if flatPathNameMap is not nil, it must be used to map logical file names into the backing file paths.
|
||||
func newStagedFileGetter(rootDir *os.File, flatPathNameMap map[string]string) *stagedFileGetter {
|
||||
return &stagedFileGetter{
|
||||
rootDir: rootDir,
|
||||
flatPathNameMap: flatPathNameMap,
|
||||
}
|
||||
}
|
||||
|
||||
type stagedFileGetter struct {
|
||||
rootDir *os.File
|
||||
flatPathNameMap map[string]string // nil, or a map from filepath.Clean()ed tar file names to expected on-filesystem names
|
||||
}
|
||||
|
||||
func (fg *stagedFileGetter) Get(filename string) (io.ReadCloser, error) {
|
||||
if fg.flatPathNameMap != nil {
|
||||
path, ok := fg.flatPathNameMap[filepath.Clean(filename)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no path mapping exists for tar entry %q", filename)
|
||||
}
|
||||
filename = path
|
||||
}
|
||||
pathFD, err := securejoin.OpenatInRoot(fg.rootDir, filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer pathFD.Close()
|
||||
return securejoin.Reopen(pathFD, unix.O_RDONLY)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
|
|
@ -13,5 +13,5 @@ import (
|
|||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
return nil, errors.New("format not supported on this system")
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("format not supported on this system"))
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
|
|
@ -3,7 +3,7 @@ package toc
|
|||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
|
|
@ -19,7 +19,7 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
|||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
d1, ok1 := annotations[tocJSONDigestAnnotation]
|
||||
d2, ok2 := annotations[internal.ManifestChecksumKey]
|
||||
d2, ok2 := annotations[minimal.ManifestChecksumKey]
|
||||
switch {
|
||||
case ok1 && ok2:
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue