go.mod: update to images@v0.117.0
This commit updates to images v0.117.0 so that the cross-distro.sh test works again (images removed fedora-39.json in main but the uses the previous version of images that includes fedora-39 so there is a mismatch (we should look into if there is a way to get github.com/osbuild/images@latest instead of main in the cross-arch test). It also updates all the vendor stuff that got pulled via the new images release (which is giantonormous). This update requires updating the Go version to 1.22.8
This commit is contained in:
parent
886ddc0bcc
commit
409b4f6048
584 changed files with 60776 additions and 50181 deletions
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
|
|
@ -109,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
|||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
|
|
@ -316,12 +316,15 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
|||
return res, nil
|
||||
}
|
||||
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary.
|
||||
func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error {
|
||||
ociConfig, configErr := src.OCIConfig(ctx)
|
||||
// Do not fail on configErr here, this might be an artifact
|
||||
// and maybe nothing needs this to be a container image and to process the config.
|
||||
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", configErr)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||
|
||||
|
|
@ -331,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
|||
// For a transitional period, this might trigger warnings because the Variant
|
||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||
// revert this check to only look for (OS, Architecture).
|
||||
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
|
||||
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
|
|
@ -339,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
|||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
|
||||
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
|
|
@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don'
|
|||
type dirImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.AlwaysSupportsSignatures
|
||||
|
||||
|
|
|
|||
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
|
|
@ -3,6 +3,7 @@ package daemon
|
|||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
|
|
@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
|||
}
|
||||
switch serverURL.Scheme {
|
||||
case "unix": // Nothing
|
||||
case "npipe": // Nothing
|
||||
case "http":
|
||||
hc := httpConfig()
|
||||
opts = append(opts, dockerclient.WithHTTPClient(hc))
|
||||
|
|
@ -82,6 +84,11 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
|||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsc,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}, nil
|
||||
|
|
@ -92,6 +99,11 @@ func httpConfig() *http.Client {
|
|||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: nil,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
|
|
@ -24,7 +24,6 @@ import (
|
|||
"slices"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
|
|
@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error {
|
|||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
switch {
|
||||
case resp.StatusCode == http.StatusUnauthorized:
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
for _, c := range parseAuthHeader(resp.Header) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
|
|
@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error {
|
|||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
|
|
|
|||
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
|
|
@ -1056,6 +1056,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
|
|||
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
|
||||
if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err)
|
||||
}
|
||||
digestAlgorithm := desc.Digest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String())
|
||||
}
|
||||
|
||||
reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1065,6 +1074,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
|
||||
}
|
||||
actualDigest := digestAlgorithm.FromBytes(payload)
|
||||
if actualDigest != desc.Digest {
|
||||
return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String())
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
|
|
@ -41,6 +41,7 @@ import (
|
|||
type dockerImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
ref dockerReference
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
|
|
@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
|||
}
|
||||
return
|
||||
}
|
||||
if parts >= len(chunks) {
|
||||
errs <- errors.New("too many parts returned by the server")
|
||||
break
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: p,
|
||||
|
|
|
|||
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
|
|
@ -24,6 +24,7 @@ import (
|
|||
type Destination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
|
|
@ -3,6 +3,7 @@ package docker
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
|
|
@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
package stubs
|
||||
|
||||
import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing.
|
||||
type IgnoresOriginalOCIConfig struct{}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return nil
|
||||
}
|
||||
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
|
|
@ -14,6 +14,7 @@ import (
|
|||
// wrapped provides the private.ImageDestination operations
|
||||
// for a destination that only implements types.ImageDestination
|
||||
type wrapped struct {
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
types.ImageDestination
|
||||
|
|
|
|||
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
|
|
@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
|||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateOldDigest: list.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
return index.editInstances(editInstances)
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []Schema2ManifestDescriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
|
|
@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
|||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
list.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
list.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
if editInstance.AddPlatform == nil {
|
||||
// Should we create a struct with empty fields instead?
|
||||
|
|
@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
|||
if len(addedEntries) != 0 {
|
||||
// slices.Clone() here to ensure a private backing array;
|
||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
list.Manifests = append(slices.Clone(list.Manifests), addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
func (list *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
|
|
@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
|
|||
return &Schema2List{*public}
|
||||
}
|
||||
|
||||
func (index *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
|
||||
func (list *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic))
|
||||
}
|
||||
|
||||
func (index *Schema2List) Clone() ListPublic {
|
||||
return index.CloneInternal()
|
||||
func (list *Schema2List) Clone() ListPublic {
|
||||
return list.CloneInternal()
|
||||
}
|
||||
|
||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
|
|
|
|||
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
|
|
@ -10,6 +10,7 @@ import (
|
|||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageSourceInternalOnly is the part of private.ImageSource that is not
|
||||
|
|
@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface {
|
|||
// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
|
||||
// on unsupported formats.
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
|
|
|||
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
//go:build linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
//go:build !linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
|||
return d.unpackedDest.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
|
|
|||
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
|
|
@ -6,6 +6,7 @@ import (
|
|||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
|
@ -98,7 +99,7 @@ func ValidateScope(scope string) error {
|
|||
}
|
||||
|
||||
func validateScopeWindows(scope string) error {
|
||||
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
|
||||
matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope)
|
||||
if !matched {
|
||||
return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
|
||||
}
|
||||
|
|
@ -119,3 +120,31 @@ func validateScopeNonWindows(scope string) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseOCIReferenceName parses the image from the oci reference.
|
||||
func parseOCIReferenceName(image string) (img string, index int, err error) {
|
||||
index = -1
|
||||
if strings.HasPrefix(image, "@") {
|
||||
idx, err := strconv.Atoi(image[1:])
|
||||
if err != nil {
|
||||
return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err)
|
||||
}
|
||||
if idx < 0 {
|
||||
return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx)
|
||||
}
|
||||
index = idx
|
||||
} else {
|
||||
img = image
|
||||
}
|
||||
return img, index, nil
|
||||
}
|
||||
|
||||
// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists
|
||||
func ParseReferenceIntoElements(reference string) (string, string, int, error) {
|
||||
dir, image := SplitPathAndImage(reference)
|
||||
image, index, err := parseOCIReferenceName(image)
|
||||
if err != nil {
|
||||
return "", "", -1, err
|
||||
}
|
||||
return dir, image, index, nil
|
||||
}
|
||||
|
|
|
|||
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/reflink"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
|
@ -27,6 +28,7 @@ import (
|
|||
type ociImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
|
|
@ -37,6 +39,9 @@ type ociImageDestination struct {
|
|||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
|
||||
if ref.sourceIndex != -1 {
|
||||
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
var index *imgspecv1.Index
|
||||
if indexExists(ref) {
|
||||
var err error
|
||||
|
|
@ -137,9 +142,21 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the
|
||||
// specific blob path determined by the blobDigest. The closed pointer indicates to the caller
|
||||
// whether blobFile has been closed or not.
|
||||
func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error {
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||
// On Windows, the “permissions of newly created files” argument to syscall.Open is
|
||||
|
|
@ -147,26 +164,27 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||
// always fails on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := blobFile.Chmod(0644); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
|
||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||
// need to explicitly close the file, since a rename won't otherwise work on Windows
|
||||
blobFile.Close()
|
||||
explicitClosed = true
|
||||
*closed = true
|
||||
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
|
|
@ -299,6 +317,67 @@ func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options pri
|
|||
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
}
|
||||
|
||||
// PutBlobFromLocalFileOption is unused but may receive functionality in the future.
|
||||
type PutBlobFromLocalFileOption struct{}
|
||||
|
||||
// PutBlobFromLocalFile arranges the data from path to be used as blob with digest.
|
||||
// It computes, and returns, the digest and size of the used file.
|
||||
//
|
||||
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
|
||||
d, ok := dest.(*ociImageDestination)
|
||||
if !ok {
|
||||
return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||
}
|
||||
|
||||
succeeded := false
|
||||
blobFileClosed := false
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer func() {
|
||||
if !blobFileClosed {
|
||||
blobFile.Close()
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
srcFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
err = reflink.LinkOrCopy(srcFile, blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
_, err = blobFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
blobDigest, err := digest.FromReader(blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
fileInfo, err := blobFile.Stat()
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return blobDigest, fileInfo.Size(), nil
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
|
|
|
|||
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
|
|
@ -61,22 +61,31 @@ type ociReference struct {
|
|||
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
||||
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
||||
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
||||
// If image=="", it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json
|
||||
// If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json.
|
||||
//
|
||||
// Must not be set if sourceIndex is set (the value is not -1).
|
||||
image string
|
||||
// If not -1, a zero-based index of an image in the manifest index. Valid only for sources.
|
||||
// Must not be set if image is set.
|
||||
sourceIndex int
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
||||
func ParseReference(reference string) (types.ImageReference, error) {
|
||||
dir, image := internal.SplitPathAndImage(reference)
|
||||
return NewReference(dir, image)
|
||||
dir, image, index, err := internal.ParseReferenceIntoElements(reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newReference(dir, image, index)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex.
|
||||
//
|
||||
// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used.
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) {
|
||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -90,7 +99,26 @@ func NewReference(dir, image string) (types.ImageReference, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
|
||||
if sourceIndex != -1 && sourceIndex < 0 {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex)
|
||||
}
|
||||
if sourceIndex != -1 && image != "" {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex)
|
||||
}
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil
|
||||
}
|
||||
|
||||
// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index.
|
||||
func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) {
|
||||
return newReference(dir, "", sourceIndex)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
//
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
return newReference(dir, image, -1)
|
||||
}
|
||||
|
||||
func (ref ociReference) Transport() types.ImageTransport {
|
||||
|
|
@ -103,7 +131,10 @@ func (ref ociReference) Transport() types.ImageTransport {
|
|||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref ociReference) StringWithinTransport() string {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
if ref.sourceIndex == -1 {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
}
|
||||
return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex)
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
|
|
@ -187,14 +218,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
|||
return imgspecv1.Descriptor{}, -1, err
|
||||
}
|
||||
|
||||
if ref.image == "" {
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
switch {
|
||||
case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references.
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
||||
ref.image, ref.sourceIndex)
|
||||
|
||||
case ref.sourceIndex != -1:
|
||||
if ref.sourceIndex >= len(index.Manifests) {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests))
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
} else {
|
||||
return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil
|
||||
|
||||
case ref.image != "":
|
||||
// if image specified, look through all manifests for a match
|
||||
var unsupportedMIMETypes []string
|
||||
for i, md := range index.Manifests {
|
||||
|
|
@ -208,8 +243,16 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
|||
if len(unsupportedMIMETypes) != 0 {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
|
||||
default:
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||
|
|
|
|||
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package layout
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// This file is named reader.go for consistency with other transports’
|
||||
// handling of “image containers”, but we don’t actually need a stateful reader object.
|
||||
|
||||
// ListResult wraps the image reference and the manifest for loading
|
||||
type ListResult struct {
|
||||
Reference types.ImageReference
|
||||
ManifestDescriptor imgspecv1.Descriptor
|
||||
}
|
||||
|
||||
// List returns a slice of manifests included in the archive
|
||||
func List(dir string) ([]ListResult, error) {
|
||||
var res []ListResult
|
||||
|
||||
indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var index imgspecv1.Index
|
||||
if err := json.Unmarshal(indexJSON, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for manifestIndex, md := range index.Manifests {
|
||||
refName := md.Annotations[imgspecv1.AnnotationRefName]
|
||||
index := -1
|
||||
if refName == "" {
|
||||
index = manifestIndex
|
||||
}
|
||||
ref, err := newReference(dir, refName, index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating image reference: %w", err)
|
||||
}
|
||||
reference := ListResult{
|
||||
Reference: ref,
|
||||
ManifestDescriptor: md,
|
||||
}
|
||||
res = append(res, reference)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type openshiftImageDestination struct {
|
||||
|
|
@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
|
|||
return d.docker.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.docker.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
|
@ -744,6 +745,11 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
|
|||
// Enforce v2 format for drop-in-configs.
|
||||
dropIn, err := loadConfigFile(path, true)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
|
||||
}
|
||||
config.updateWithConfigurationFrom(dropIn)
|
||||
|
|
|
|||
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
|
|
@ -3,6 +3,7 @@ package tlsclientconfig
|
|||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
|
@ -36,12 +37,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
// Race with someone who deleted the
|
||||
// file after we read the directory's
|
||||
// list of contents?
|
||||
logrus.Warnf("error reading certificate %q: %v", fullPath, err)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return err
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
|
|
@ -20,7 +20,7 @@ func (f *fulcioTrustRoot) validate() error {
|
|||
return errors.New("fulcio disabled at compile-time")
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
return nil, errors.New("fulcio disabled at compile-time")
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
|
|
@ -13,3 +13,12 @@ func (err InvalidSignatureError) Error() string {
|
|||
func NewInvalidSignatureError(msg string) InvalidSignatureError {
|
||||
return InvalidSignatureError{msg: msg}
|
||||
}
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
|
|
@ -40,15 +40,6 @@ type UntrustedRekorPayload struct {
|
|||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
|
|
@ -10,6 +10,6 @@ import (
|
|||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
||||
}
|
||||
|
|
|
|||
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
|
|
@ -17,11 +17,13 @@ import (
|
|||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
srcImpl "github.com/containers/image/v5/internal/imagesource/impl"
|
||||
srcStubs "github.com/containers/image/v5/internal/imagesource/stubs"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
|
|
@ -31,6 +33,7 @@ import (
|
|||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -57,8 +60,9 @@ type storageImageDestination struct {
|
|||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||
manifest []byte // (Per-instance) manifest contents, or nil if not yet known.
|
||||
manifestMIMEType string // Valid if manifest != nil
|
||||
manifestDigest digest.Digest // Valid if manifest != nil
|
||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
|
|
@ -108,8 +112,10 @@ type storageImageDestinationLockProtected struct {
|
|||
//
|
||||
// Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
|
||||
// to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
// Mapping from layer index to a TOC Digest.
|
||||
// If this is set, then either c/storage/pkg/chunked/toc.GetTOCDigest must have returned a value, or indexToDiffID must be set as well.
|
||||
indexToTOCDigest map[int]digest.Digest
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
|
||||
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
|
||||
|
|
@ -121,6 +127,9 @@ type storageImageDestinationLockProtected struct {
|
|||
filenames map[digest.Digest]string
|
||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||
fileSizes map[digest.Digest]int64
|
||||
|
||||
// Config
|
||||
configDigest digest.Digest // "" if N/A or not known yet.
|
||||
}
|
||||
|
||||
// addedLayerInfo records data about a layer to use in this image.
|
||||
|
|
@ -201,6 +210,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
|||
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr)
|
||||
}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
|
@ -214,7 +235,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
return info, err
|
||||
}
|
||||
|
||||
if options.IsConfig || options.LayerIndex == nil {
|
||||
if options.IsConfig {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if s.lockProtected.configDigest != "" {
|
||||
return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q",
|
||||
s.lockProtected.configDigest.String(), info.Digest.String())
|
||||
}
|
||||
s.lockProtected.configDigest = info.Digest
|
||||
return info, nil
|
||||
}
|
||||
if options.LayerIndex == nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
|
|
@ -315,6 +346,56 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
|
|||
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||
// The fallback _must not_ be done otherwise.
|
||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
|
||||
inputTOCDigest, err := toc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// The identity of partially-pulled layers is, as long as we keep compatibility with tar-like consumers,
|
||||
// unfixably ambiguous: there are two possible “views” of the same file (same compressed digest),
|
||||
// the traditional “view” that decompresses the primary stream and consumes a tar file,
|
||||
// and the partial-pull “view” that starts with the TOC.
|
||||
// The two “views” have two separate metadata sets and may refer to different parts of the blob for file contents;
|
||||
// the direct way to ensure they are consistent would be to read the full primary stream (and authenticate it against
|
||||
// the compressed digest), and ensure the metadata and layer contents exactly match the partially-pulled contents -
|
||||
// making the partial pull completely pointless.
|
||||
//
|
||||
// Instead, for partial-pull-capable layers (with inputTOCDigest set), we require the image to “commit”
|
||||
// to uncompressed layer digest values via the config's RootFS.DiffIDs array:
|
||||
// they are already naturally computed for traditionally-pulled layers, and for partially-pulled layers we
|
||||
// do the optimal partial pull, and then reconstruct the uncompressed tar stream just to (expensively) compute this digest.
|
||||
//
|
||||
// Layers which don’t support partial pulls (inputTOCDigest == "", incl. all schema1 layers) can be let through:
|
||||
// the partial pull code will either not engage, or consume the full layer; and the rules of indexToTOCDigest / layerIdentifiedByTOC
|
||||
// ensure the layer is identified by DiffID, i.e. using the traditional “view”.
|
||||
//
|
||||
// But if inputTOCDigest is set and the input image doesn't have RootFS.DiffIDs (the config is invalid for schema2/OCI),
|
||||
// don't allow a partial pull, and fall back to PutBlobWithOptions.
|
||||
//
|
||||
// (The user can opt out of the DiffID commitment checking by a c/storage option, giving up security for performance,
|
||||
// but we will still trigger the fall back here, and we will still enforce a DiffID match, so that the set of accepted images
|
||||
// is the same in both cases, and so that users are not tempted to set the c/storage option to allow accepting some invalid images.)
|
||||
var untrustedDiffID digest.Digest // "" if unknown
|
||||
udid, err := s.untrustedLayerDiffID(options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// PutBlobPartial is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: in PutBlobPartial, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
if inputTOCDigest != nil {
|
||||
return private.UploadedBlob{}, private.NewErrFallbackToOrdinaryLayerDownload(err)
|
||||
}
|
||||
untrustedDiffID = "" // A schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
untrustedDiffID = udid
|
||||
}
|
||||
|
||||
fetcher := zstdFetcher{
|
||||
chunkAccessor: chunkAccessor,
|
||||
ctx: ctx,
|
||||
|
|
@ -351,35 +432,55 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
blobDigest := srcInfo.Digest
|
||||
|
||||
s.lock.Lock()
|
||||
if out.UncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
// Don’t set indexToTOCDigest on this path:
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
|
||||
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
|
||||
// That TOC is quite unlikely to match any other TOC value.
|
||||
if err := func() error { // A scope for defer
|
||||
defer s.lock.Unlock()
|
||||
|
||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
// For true partial pulls, c/storage decides whether to compute the uncompressed digest based on an option in storage.conf
|
||||
// (defaults to true, to avoid ambiguity.)
|
||||
// c/storage can also be configured, to consume a layer not prepared for partial pulls (primarily to allow composefs conversion),
|
||||
// and in that case it always consumes the full blob and always computes the uncompressed digest.
|
||||
if out.UncompressedDigest != "" {
|
||||
// This is centrally enforced later, in commitLayer, but because we have the value available,
|
||||
// we might just as well check immediately.
|
||||
if untrustedDiffID != "" && out.UncompressedDigest != untrustedDiffID {
|
||||
return fmt.Errorf("uncompressed digest of layer %q is %q, config claims %q", srcInfo.Digest.String(),
|
||||
out.UncompressedDigest.String(), untrustedDiffID.String())
|
||||
}
|
||||
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
|
||||
// If the whole layer has been consumed, chunked.GetDiffer is responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != "" {
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
}
|
||||
} else {
|
||||
// Sanity-check the defined rules for indexToTOCDigest.
|
||||
if inputTOCDigest == nil {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned a TOC-only identity for layer %q with no TOC digest", srcInfo.Digest.String())
|
||||
}
|
||||
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
} else {
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
s.lock.Unlock()
|
||||
|
||||
succeeded = true
|
||||
return private.UploadedBlob{
|
||||
|
|
@ -417,22 +518,43 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
|||
if err := blobDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
if options.TOCDigest != "" {
|
||||
useTOCDigest := false // If set, (options.TOCDigest != "" && options.LayerIndex != nil) AND we can use options.TOCDigest safely.
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if err := options.TOCDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
// Only consider using TOCDigest if we can avoid ambiguous image “views”, see the detailed comment in PutBlobPartial.
|
||||
_, err := s.untrustedLayerDiffID(*options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// options.TOCDigest is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("internal error: in TryReusingBlobWithOptions, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
logrus.Debugf("Not using TOC %q to look for layer reuse: %v", options.TOCDigest, err)
|
||||
// But don’t abort entirely, keep useTOCDigest = false, try a blobDigest match.
|
||||
default:
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
useTOCDigest = true
|
||||
}
|
||||
}
|
||||
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if options.SrcRef != nil && useTOCDigest {
|
||||
// Check if we have the layer in the underlying additional layer store.
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
||||
} else if err == nil {
|
||||
// Compare the long comment in PutBlobPartial. We assume that the Additional Layer Store will, somehow,
|
||||
// avoid layer “view” ambiguity.
|
||||
alsTOCDigest := aLayer.TOCDigest()
|
||||
if alsTOCDigest != options.TOCDigest {
|
||||
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
|
||||
|
|
@ -505,13 +627,13 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
|||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
|
||||
s.lockProtected.blobDiffIDs[reused.Digest] = uncompressedDigest
|
||||
return true, reused, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if useTOCDigest {
|
||||
// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
|
||||
// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
|
||||
uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
|
||||
|
|
@ -532,6 +654,11 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
|||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
if uncompressedDigest == "" && layers[0].UncompressedDigest != "" {
|
||||
// Determine an uncompressed digest if at all possible, to use a traditional image ID
|
||||
// and to maximize image reuse.
|
||||
uncompressedDigest = layers[0].UncompressedDigest
|
||||
}
|
||||
if uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
|
|
@ -568,13 +695,22 @@ func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest,
|
|||
|
||||
// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
|
||||
type trustedLayerIdentityData struct {
|
||||
layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// This can only be true if c/storage/pkg/chunked/toc.GetTOCDigest returns a value.
|
||||
layerIdentifiedByTOC bool
|
||||
|
||||
diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
|
||||
tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
|
||||
blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
|
||||
}
|
||||
|
||||
// logString() prints a representation of trusted suitable identifying a layer in logs and errors.
|
||||
// The string is already quoted to expose malicious input and does not need to be quoted again.
|
||||
// Note that it does not include _all_ of the contents.
|
||||
func (trusted trustedLayerIdentityData) logString() string {
|
||||
return fmt.Sprintf("%q/%q/%q", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
}
|
||||
|
||||
// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
|
||||
// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
|
||||
// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
|
||||
|
|
@ -785,23 +921,6 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
|||
return nil
|
||||
}
|
||||
|
||||
// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
|
||||
// and an indication whether the input already has the shape of a layer ID.
|
||||
// It returns ("", false) if the layer is not found at all (which should never happen)
|
||||
func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
}
|
||||
return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||
//
|
||||
|
|
@ -813,16 +932,15 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
|
|||
// must guarantee that, at any given time, at most one goroutine may execute
|
||||
// `commitLayer()`.
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
|
||||
// Already committed? Return early.
|
||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
var parentLayer string
|
||||
var parentLayer string // "" if no parent
|
||||
if index != 0 {
|
||||
// s.indexToStorageID can only be written by this function, and our caller
|
||||
// is responsible for ensuring it can be only be called by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
prev, ok := s.indexToStorageID[index-1]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
|
||||
|
|
@ -830,18 +948,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
parentLayer = prev
|
||||
}
|
||||
|
||||
// Carry over the previous ID for empty non-base layers.
|
||||
if info.emptyLayer {
|
||||
s.indexToStorageID[index] = parentLayer
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
// The layerID refers either to the DiffID or the digest of the TOC.
|
||||
layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
// Collect trusted parameters of the layer.
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
// Check if the layer exists already and the caller just (incorrectly) forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
//
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
|
|
@ -865,23 +982,54 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||
}
|
||||
|
||||
layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
s.lock.Lock()
|
||||
trusted, ok = s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
|
||||
}
|
||||
}
|
||||
|
||||
id := layerIDComponent
|
||||
if !layerIDComponentStandalone || parentLayer != "" {
|
||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
|
||||
// Ensure that we always see the same “view” of a layer, as identified by the layer’s uncompressed digest,
|
||||
// unless the user has explicitly opted out of this in storage.conf: see the more detailed explanation in PutBlobPartial.
|
||||
if trusted.diffID != "" {
|
||||
untrustedDiffID, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available for DiffID check", index)
|
||||
return true, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we could not have reused a TOC-identified layer nor have done a TOC-identified partial pull,
|
||||
// i.e. there is no other “view” to worry about. Sanity-check that we really see the only expected view.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// we _must_ fail if we used a TOC-identified layer - but PutBlobPartial should have already
|
||||
// refused to do a partial pull, so we are in an inconsistent state.
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return false, fmt.Errorf("internal error: layer %d for blob %s was identified by TOC, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
}
|
||||
// else a schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
} else if trusted.diffID != untrustedDiffID {
|
||||
return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID)
|
||||
}
|
||||
}
|
||||
|
||||
id := layerID(parentLayer, trusted)
|
||||
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
s.indexToStorageID[index] = layer.ID
|
||||
return false, nil
|
||||
}
|
||||
|
||||
layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
|
||||
layer, err := s.createNewLayer(index, trusted, parentLayer, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -892,32 +1040,62 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
|
||||
// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted)
|
||||
func layerID(parentID string, trusted trustedLayerIdentityData) string {
|
||||
var component string
|
||||
mustHash := false
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
// "@" is not a valid start of a digest.Digest.Encoded(), so this is unambiguous with the !layerIdentifiedByTOC case.
|
||||
// But we _must_ hash this below to get a Digest.Encoded()-formatted value.
|
||||
component = "@TOC=" + trusted.tocDigest.Encoded()
|
||||
mustHash = true
|
||||
} else {
|
||||
component = trusted.diffID.Encoded() // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
if parentID == "" && !mustHash {
|
||||
return component
|
||||
}
|
||||
return digest.Canonical.FromString(parentID + "+" + component).Encoded()
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be "").
|
||||
// If the layer cannot be committed yet, the function returns (nil, nil).
|
||||
func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
func (s *storageImageDestination) createNewLayer(index int, trusted trustedLayerIdentityData, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
s.lock.Lock()
|
||||
diffOutput, ok := s.lockProtected.diffOutputs[index]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
// If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// Typically, we compute a trusted DiffID value to authenticate the layer contents, see the detailed explanation
|
||||
// in PutBlobPartial. If the user has opted out of that, but we know a trusted DiffID value
|
||||
// (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// That way it will be persisted in storage even if the cache is deleted; also
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
|
||||
// the costly commit delay until a manifest is available).
|
||||
s.lock.Lock()
|
||||
if d, ok := s.lockProtected.indexToDiffID[index]; ok {
|
||||
diffOutput.UncompressedDigest = d
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic.
|
||||
if diffOutput.UncompressedDigest == "" && trusted.diffID != "" {
|
||||
diffOutput.UncompressedDigest = trusted.diffID
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
var untrustedUncompressedDigest digest.Digest
|
||||
if diffOutput.UncompressedDigest == "" {
|
||||
d, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d == "" {
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we should have !trusted.layerIdentifiedByTOC, i.e. we should have set
|
||||
// diffOutput.UncompressedDigest above in this function, at the very latest.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// commitLayer should have already refused this image when dealing with the “view” ambiguity.
|
||||
return nil, fmt.Errorf("internal error: layer %d for blob %s was partially-pulled with unknown UncompressedDigest, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
untrustedUncompressedDigest = d
|
||||
|
|
@ -965,19 +1143,17 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
|||
// then we need to read the desired contents from a layer.
|
||||
var filename string
|
||||
var gotFilename bool
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest)
|
||||
if ok && trusted.blobDigest != "" {
|
||||
if trusted.blobDigest != "" {
|
||||
s.lock.Lock()
|
||||
filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
|
||||
}
|
||||
s.lock.Unlock()
|
||||
if !ok { // We have already determined newLayerID, so the data must have been available.
|
||||
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
|
||||
s.lock.Unlock()
|
||||
}
|
||||
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||
var trustedOriginalSize *int64
|
||||
if gotFilename {
|
||||
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost.
|
||||
} else {
|
||||
// Try to find the layer with contents matching the data we use.
|
||||
var layer *storage.Layer // = nil
|
||||
|
|
@ -997,7 +1173,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
|||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
return nil, fmt.Errorf("layer for blob %s not found", trusted.logString())
|
||||
}
|
||||
|
||||
// Read the layer's contents.
|
||||
|
|
@ -1007,7 +1183,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
|||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2)
|
||||
return nil, fmt.Errorf("reading layer %q for blob %s: %w", layer.ID, trusted.logString(), err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
|
|
@ -1032,22 +1208,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
|||
if trusted.diffID == "" && layer.UncompressedDigest != "" {
|
||||
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
|
||||
}
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
|
||||
// Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse.
|
||||
// But we don’t want to just use the size from the manifest if we never saw the compressed blob,
|
||||
// so that we don’t propagate mistakes / attacks.
|
||||
//
|
||||
// FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
|
||||
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
|
||||
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
// s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename.
|
||||
// So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit
|
||||
// to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to
|
||||
// this new layer copy can help.)
|
||||
if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 {
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
sizeCopy := layer.CompressedSize
|
||||
trustedOriginalSize = &sizeCopy
|
||||
} else {
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost.
|
||||
}
|
||||
|
||||
// Allow using the already-collected layer contents without extracting the layer again.
|
||||
//
|
||||
// This only matches against the uncompressed digest.
|
||||
// We don’t have the original compressed data here to trivially set filenames[layerDigest].
|
||||
// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
|
||||
// If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the
|
||||
// same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config),
|
||||
// we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate
|
||||
// between “original files” and “possibly uncompressed files”.
|
||||
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
||||
if trusted.diffID != "" {
|
||||
s.lock.Lock()
|
||||
|
|
@ -1067,55 +1257,128 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
|||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: trustedOriginalDigest,
|
||||
OriginalSize: trustedOriginalSize, // nil in many cases
|
||||
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
|
||||
UncompressedDigest: trusted.diffID,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err)
|
||||
return nil, fmt.Errorf("adding layer with blob %s: %w", trusted.logString(), err)
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
|
||||
// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
|
||||
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
if s.manifest == nil {
|
||||
return "", nil
|
||||
// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed,
|
||||
// to allow using image.FromUnparsedImage.
|
||||
type uncommittedImageSource struct {
|
||||
srcImpl.Compat
|
||||
srcImpl.PropertyMethodsInitialize
|
||||
srcImpl.NoSignatures
|
||||
srcImpl.DoesNotAffectLayerInfosForCopy
|
||||
srcStubs.NoGetBlobAtInitialize
|
||||
|
||||
d *storageImageDestination
|
||||
}
|
||||
|
||||
func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource {
|
||||
s := &uncommittedImageSource{
|
||||
PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{
|
||||
HasThreadSafeGetBlob: true,
|
||||
}),
|
||||
NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()),
|
||||
|
||||
d: d,
|
||||
}
|
||||
s.Compat = srcImpl.AddCompat(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Reference() types.ImageReference {
|
||||
return u.d.Reference()
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
return u.d.manifest, u.d.manifestMIMEType, nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
blob, err := u.d.getConfigBlob(info)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil
|
||||
}
|
||||
|
||||
// errUntrustedLayerDiffIDNotYetAvailable is returned by untrustedLayerDiffID
|
||||
// if the value is not yet available (but it can be available after s.manifests is set).
|
||||
// This should only happen for external callers of the transport, not for c/image/copy.
|
||||
//
|
||||
// Callers of untrustedLayerDiffID before PutManifest must handle this error specially;
|
||||
// callers after PutManifest can use the default, reporting an internal error.
|
||||
var errUntrustedLayerDiffIDNotYetAvailable = errors.New("internal error: untrustedLayerDiffID has no value available and fallback was not implemented")
|
||||
|
||||
// untrustedLayerDiffIDUnknownError is returned by untrustedLayerDiffID
|
||||
// if the image’s format does not provide DiffIDs.
|
||||
type untrustedLayerDiffIDUnknownError struct {
|
||||
layerIndex int
|
||||
}
|
||||
|
||||
func (e untrustedLayerDiffIDUnknownError) Error() string {
|
||||
return fmt.Sprintf("DiffID value for layer %d is unknown or explicitly empty", e.layerIndex)
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// It may return two special errors, errUntrustedLayerDiffIDNotYetAvailable or untrustedLayerDiffIDUnknownError.
|
||||
//
|
||||
// WARNING: This function does not even validate that the returned digest has a valid format.
|
||||
// WARNING: We don’t _always_ validate this DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob,
|
||||
// nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set
|
||||
// by NoteOriginalOCIConfig and are not being updated any more;
|
||||
// or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
|
||||
if s.untrustedDiffIDValues == nil {
|
||||
mt := manifest.GuessMIMEType(s.manifest)
|
||||
if mt != imgspecv1.MediaTypeImageManifest {
|
||||
// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
|
||||
// and then use types.Image.OCIConfig so that we can parse the image.
|
||||
//
|
||||
// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
|
||||
// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
|
||||
// So it is not implemented yet.
|
||||
return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
|
||||
}
|
||||
man, err := manifest.FromBlob(s.manifest, mt)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parsing manifest: %w", err)
|
||||
// Typically, we expect untrustedDiffIDValues to be set by the generic copy code
|
||||
// via NoteOriginalOCIConfig; this is a compatibility fallback for external callers
|
||||
// of the public types.ImageDestination.
|
||||
if s.manifest == nil {
|
||||
return "", errUntrustedLayerDiffIDNotYetAvailable
|
||||
}
|
||||
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
ctx := context.Background() // This is all happening in memory, no need to worry about cancellation.
|
||||
unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil)
|
||||
sourced, err := image.FromUnparsedImage(ctx, nil, unparsed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", fmt.Errorf("parsing image to be committed: %w", err)
|
||||
}
|
||||
configOCI, err := sourced.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("obtaining config of image to be committed: %w", err)
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return "", err
|
||||
}
|
||||
s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(configOCI)
|
||||
}
|
||||
|
||||
// Let entirely empty / missing diffIDs through; but if the array does exist, expect it to contain an entry for every layer,
|
||||
// and fail hard on missing entries. This tries to account for completely naive image producers who just don’t fill DiffID,
|
||||
// while still detecting incorrectly-built / confused images.
|
||||
//
|
||||
// schema1 images don’t have DiffID values in the config.
|
||||
// Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values, so treat arrays of all-empty
|
||||
// values as “DiffID unknown”.
|
||||
// For schema 1, it is important to exit here, before the layerIndex >= len(s.untrustedDiffIDValues)
|
||||
// check, because the format conversion from schema1 to OCI used to compute untrustedDiffIDValues
|
||||
// changes the number of layres (drops items with Schema1V1Compatibility.ThrowAway).
|
||||
if !slices.ContainsFunc(s.untrustedDiffIDValues, func(d digest.Digest) bool {
|
||||
return d != ""
|
||||
}) {
|
||||
return "", untrustedLayerDiffIDUnknownError{
|
||||
layerIndex: layerIndex,
|
||||
}
|
||||
}
|
||||
if layerIndex >= len(s.untrustedDiffIDValues) {
|
||||
|
|
@ -1124,6 +1387,15 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
|||
return s.untrustedDiffIDValues[layerIndex], nil
|
||||
}
|
||||
|
||||
// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config.
|
||||
// The caller must ensure s.lock does not need to be held.
|
||||
func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) {
|
||||
s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
}
|
||||
}
|
||||
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
|
|
@ -1131,7 +1403,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
|||
func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
|
||||
if len(s.manifest) == 0 {
|
||||
if s.manifest == nil {
|
||||
return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
|
||||
}
|
||||
toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
|
||||
|
|
@ -1159,7 +1431,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
|||
}
|
||||
}
|
||||
// Find the list of layer blobs.
|
||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||
man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
|
|
@ -1193,29 +1465,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
|||
imgOptions.CreationDate = *inspect.Created
|
||||
}
|
||||
|
||||
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
||||
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
||||
dataBlobs := set.New[digest.Digest]()
|
||||
for blob := range s.lockProtected.filenames {
|
||||
dataBlobs.Add(blob)
|
||||
}
|
||||
for _, layerBlob := range layerBlobs {
|
||||
dataBlobs.Delete(layerBlob.Digest)
|
||||
}
|
||||
for _, blob := range dataBlobs.Values() {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[blob])
|
||||
// Set up to save the config as a data item. Since we only share layers, the config should be in a file.
|
||||
if s.lockProtected.configDigest != "" {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest])
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
|
||||
return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err)
|
||||
}
|
||||
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
|
||||
Key: blob.String(),
|
||||
Key: s.lockProtected.configDigest.String(),
|
||||
Data: v,
|
||||
Digest: digest.Canonical.FromBytes(v),
|
||||
})
|
||||
}
|
||||
// Set up to save the options.UnparsedToplevel's manifest if it differs from
|
||||
// the per-platform one, which is saved below.
|
||||
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
if !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
manifestDigest, err := manifest.Digest(toplevelManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("digesting top-level manifest: %w", err)
|
||||
|
|
@ -1370,6 +1634,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
|
|||
return err
|
||||
}
|
||||
s.manifest = bytes.Clone(manifestBlob)
|
||||
if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil
|
||||
s.manifest = []byte{}
|
||||
}
|
||||
s.manifestMIMEType = manifest.GuessMIMEType(s.manifest)
|
||||
s.manifestDigest = digest
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1392,7 +1660,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
|
|||
if instanceDigest == nil {
|
||||
s.signatures = sigblob
|
||||
s.metadata.SignatureSizes = sizes
|
||||
if len(s.manifest) > 0 {
|
||||
if s.manifest != nil {
|
||||
manifestDigest := s.manifestDigest
|
||||
instanceDigest = &manifestDigest
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
|
|
@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
|
|||
}
|
||||
if s.id == "" {
|
||||
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
// %.0w makes the error visible to error.Unwrap() without including any text.
|
||||
// ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause.
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
}
|
||||
if loadedImage == nil {
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
|
|
|
|||
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
|
|
@ -35,13 +35,14 @@ type storageImageSource struct {
|
|||
impl.PropertyMethodsInitialize
|
||||
stubs.NoGetBlobAtInitialize
|
||||
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
cachedManifestMIMEType string // Valid if cachedManifest != nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
}
|
||||
|
||||
// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
|
||||
|
|
@ -247,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
|||
}
|
||||
return blob, manifest.GuessMIMEType(blob), err
|
||||
}
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
// The manifest is stored as a big data item.
|
||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||
if s.imageRef.named != nil {
|
||||
|
|
@ -267,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
|||
}
|
||||
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
||||
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest)
|
||||
}
|
||||
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
|
||||
return s.cachedManifest, s.cachedManifestMIMEType, err
|
||||
}
|
||||
|
||||
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
|
|
@ -6,9 +6,9 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 33
|
||||
VersionMinor = 34
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
|
|
|||
2
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/utils"
|
||||
"go.mozilla.org/pkcs7"
|
||||
"github.com/smallstep/pkcs7"
|
||||
)
|
||||
|
||||
type pkcs7KeyWrapper struct {
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
4
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
|
|
@ -17,13 +17,13 @@ env:
|
|||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
###
|
||||
FEDORA_NAME: "fedora-39"
|
||||
FEDORA_NAME: "fedora-41"
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
|
||||
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/Makefile
generated
vendored
2
vendor/github.com/containers/storage/Makefile
generated
vendored
|
|
@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
|
|||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 1.61.0
|
||||
GOLANGCI_LINT_VERSION := 1.63.4
|
||||
|
||||
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
|
|
@ -1 +1 @@
|
|||
1.56.1
|
||||
1.57.1
|
||||
|
|
|
|||
8
vendor/github.com/containers/storage/check.go
generated
vendored
8
vendor/github.com/containers/storage/check.go
generated
vendored
|
|
@ -80,7 +80,7 @@ type CheckOptions struct {
|
|||
// layer to the contents that we'd expect it to have to ignore certain
|
||||
// discrepancies
|
||||
type checkIgnore struct {
|
||||
ownership, timestamps, permissions bool
|
||||
ownership, timestamps, permissions, filetype bool
|
||||
}
|
||||
|
||||
// CheckMost returns a CheckOptions with mostly just "quick" checks enabled.
|
||||
|
|
@ -139,8 +139,10 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
|
|||
if strings.Contains(o, "ignore_chown_errors=true") {
|
||||
ignore.ownership = true
|
||||
}
|
||||
if strings.HasPrefix(o, "force_mask=") {
|
||||
if strings.Contains(o, "force_mask=") {
|
||||
ignore.ownership = true
|
||||
ignore.permissions = true
|
||||
ignore.filetype = true
|
||||
}
|
||||
}
|
||||
for o := range s.pullOptions {
|
||||
|
|
@ -833,7 +835,7 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
|
|||
// compareFileInfo returns a string summarizing what's different between the two checkFileInfos
|
||||
func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string {
|
||||
var comparison []string
|
||||
if a.typeflag != b.typeflag {
|
||||
if a.typeflag != b.typeflag && !ignore.filetype {
|
||||
comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag))
|
||||
}
|
||||
if idmap != nil && !idmap.Empty() {
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
|
|
@ -776,3 +776,8 @@ func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
|||
func (a *Driver) SupportsShifting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
|
|
@ -673,3 +673,8 @@ func (d *Driver) ListLayers() ([]string, error) {
|
|||
func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/drivers/chown_darwin.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/chown_darwin.go
generated
vendored
|
|
@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
|
|||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
capability, err := system.Lgetxattr(path, "security.capability")
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
|
||||
return fmt.Errorf("%s: %w", os.Args[0], err)
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
|
|
@ -101,7 +101,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
|
|||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
cap, err := system.Lgetxattr(path, "security.capability")
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
|
||||
return fmt.Errorf("%s: %w", os.Args[0], err)
|
||||
}
|
||||
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
|
|
@ -106,7 +106,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
|
|||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
|
|
@ -279,7 +279,7 @@ func doCopyXattrs(srcPath, dstPath string) error {
|
|||
}
|
||||
|
||||
xattrs, err := system.Llistxattr(srcPath)
|
||||
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
24
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
24
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
|
|
@ -8,6 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
|
|
@ -81,6 +82,23 @@ type ApplyDiffWithDifferOpts struct {
|
|||
Flags map[string]interface{}
|
||||
}
|
||||
|
||||
// DedupArgs contains the information to perform storage deduplication.
|
||||
type DedupArgs struct {
|
||||
// Layers is the list of layers to deduplicate.
|
||||
Layers []string
|
||||
|
||||
// Options that are passed directly to the pkg/dedup.DedupDirs function.
|
||||
Options dedup.DedupOptions
|
||||
}
|
||||
|
||||
// DedupResult contains the result of the Dedup() call.
|
||||
type DedupResult struct {
|
||||
// Deduped represents the total number of bytes saved by deduplication.
|
||||
// This value accounts also for all previously deduplicated data, not only the savings
|
||||
// from the last run.
|
||||
Deduped uint64
|
||||
}
|
||||
|
||||
// InitFunc initializes the storage driver.
|
||||
type InitFunc func(homedir string, options Options) (Driver, error)
|
||||
|
||||
|
|
@ -139,6 +157,8 @@ type ProtoDriver interface {
|
|||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
AdditionalImageStores() []string
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
Dedup(DedupArgs) (DedupResult, error)
|
||||
}
|
||||
|
||||
// DiffDriver is the interface to use to implement graph diffs
|
||||
|
|
@ -211,8 +231,8 @@ const (
|
|||
// DifferOutputFormatDir means the output is a directory and it will
|
||||
// keep the original layout.
|
||||
DifferOutputFormatDir = iota
|
||||
// DifferOutputFormatFlat will store the files by their checksum, in the form
|
||||
// checksum[0:2]/checksum[2:]
|
||||
// DifferOutputFormatFlat will store the files by their checksum, per
|
||||
// pkg/chunked/internal/composefs.RegularFilePathForValidatedDigest.
|
||||
DifferOutputFormatFlat
|
||||
)
|
||||
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/drivers/overlay/check_116.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/overlay/check_116.go
generated
vendored
|
|
@ -10,7 +10,6 @@ import (
|
|||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func scanForMountProgramIndicators(home string) (detected bool, err error) {
|
||||
|
|
@ -28,7 +27,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) {
|
|||
}
|
||||
if d.IsDir() {
|
||||
xattrs, err := system.Llistxattr(path)
|
||||
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
for _, xattr := range xattrs {
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/drivers/overlay/composefs.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/overlay/composefs.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
//go:build linux && cgo
|
||||
//go:build linux
|
||||
|
||||
package overlay
|
||||
|
||||
|
|
@ -27,7 +27,7 @@ var (
|
|||
composeFsHelperErr error
|
||||
|
||||
// skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel
|
||||
// does not support it. Mounting directly via a file will be supported in kernel 6.12.
|
||||
// does not support it. Mounting directly via a file is supported from Linux 6.12.
|
||||
skipMountViaFile atomic.Bool
|
||||
)
|
||||
|
||||
|
|
|
|||
21
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
21
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
|
|
@ -22,6 +22,7 @@ import (
|
|||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/drivers/overlayutils"
|
||||
"github.com/containers/storage/drivers/quota"
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
|
|
@ -1096,6 +1097,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
|
|||
}
|
||||
|
||||
if d.options.forceMask != nil {
|
||||
st.Mode |= os.ModeDir
|
||||
if err := idtools.SetContainersOverrideXattr(diff, st); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -2740,3 +2742,22 @@ func getMappedMountRoot(path string) string {
|
|||
}
|
||||
return dirName
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
var dirs []string
|
||||
for _, layer := range req.Layers {
|
||||
dir, _, inAdditionalStore := d.dir2(layer, false)
|
||||
if inAdditionalStore {
|
||||
continue
|
||||
}
|
||||
if err := fileutils.Exists(dir); err == nil {
|
||||
dirs = append(dirs, filepath.Join(dir, "diff"))
|
||||
}
|
||||
}
|
||||
r, err := dedup.DedupDirs(dirs, req.Options)
|
||||
if err != nil {
|
||||
return graphdriver.DedupResult{}, err
|
||||
}
|
||||
return graphdriver.DedupResult{Deduped: r.Deduped}, nil
|
||||
}
|
||||
|
|
|
|||
23
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
23
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
|
|
@ -1,23 +0,0 @@
|
|||
//go:build linux && !cgo
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func openComposefsMount(dataDir string) (int, error) {
|
||||
return 0, fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func getComposeFsHelper() (string, error) {
|
||||
return "", fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func mountComposefsBlob(dataDir, mountPoint string) error {
|
||||
return fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
|
||||
return fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
2
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !exclude_graphdriver_overlay && linux && cgo
|
||||
//go:build !exclude_graphdriver_overlay && linux
|
||||
|
||||
package register
|
||||
|
||||
|
|
|
|||
17
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
17
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
|
|
@ -10,6 +10,7 @@ import (
|
|||
"strings"
|
||||
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
|
|
@ -348,3 +349,19 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string,
|
|||
func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
|
||||
return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
var dirs []string
|
||||
for _, layer := range req.Layers {
|
||||
dir := d.dir2(layer, false)
|
||||
if err := fileutils.Exists(dir); err == nil {
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
}
|
||||
r, err := dedup.DedupDirs(dirs, req.Options)
|
||||
if err != nil {
|
||||
return graphdriver.DedupResult{}, err
|
||||
}
|
||||
return graphdriver.DedupResult{Deduped: r.Deduped}, nil
|
||||
}
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
|
|
@ -975,6 +975,11 @@ func (d *Driver) AdditionalImageStores() []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
||||
// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
|
||||
// matching those in toContainer to matching those in toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
|
|
@ -511,3 +511,8 @@ func (d *Driver) ListLayers() ([]string, error) {
|
|||
func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dedup performs deduplication of the driver's storage.
|
||||
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
|
||||
return graphdriver.DedupResult{}, nil
|
||||
}
|
||||
|
|
|
|||
163
vendor/github.com/containers/storage/internal/dedup/dedup.go
generated
vendored
Normal file
163
vendor/github.com/containers/storage/internal/dedup/dedup.go
generated
vendored
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
package dedup
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io/fs"
|
||||
"sync"
|
||||
|
||||
"github.com/opencontainers/selinux/pkg/pwalkdir"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var notSupported = errors.New("reflinks are not supported on this platform")
|
||||
|
||||
const (
|
||||
DedupHashInvalid DedupHashMethod = iota
|
||||
DedupHashCRC
|
||||
DedupHashFileSize
|
||||
DedupHashSHA256
|
||||
)
|
||||
|
||||
type DedupHashMethod int
|
||||
|
||||
type DedupOptions struct {
|
||||
// HashMethod is the hash function to use to find identical files
|
||||
HashMethod DedupHashMethod
|
||||
}
|
||||
|
||||
type DedupResult struct {
|
||||
// Deduped represents the total number of bytes saved by deduplication.
|
||||
// This value accounts also for all previously deduplicated data, not only the savings
|
||||
// from the last run.
|
||||
Deduped uint64
|
||||
}
|
||||
|
||||
func getFileChecksum(hashMethod DedupHashMethod, path string, info fs.FileInfo) (string, error) {
|
||||
switch hashMethod {
|
||||
case DedupHashInvalid:
|
||||
return "", fmt.Errorf("invalid hash method: %v", hashMethod)
|
||||
case DedupHashFileSize:
|
||||
return fmt.Sprintf("%v", info.Size()), nil
|
||||
case DedupHashSHA256:
|
||||
return readAllFile(path, info, func(buf []byte) (string, error) {
|
||||
h := sha256.New()
|
||||
if _, err := h.Write(buf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(h.Sum(nil)), nil
|
||||
})
|
||||
case DedupHashCRC:
|
||||
return readAllFile(path, info, func(buf []byte) (string, error) {
|
||||
c := crc64.New(crc64.MakeTable(crc64.ECMA))
|
||||
if _, err := c.Write(buf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
bufRet := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(bufRet, c.Sum64())
|
||||
return string(bufRet), nil
|
||||
})
|
||||
default:
|
||||
return "", fmt.Errorf("unknown hash method: %v", hashMethod)
|
||||
}
|
||||
}
|
||||
|
||||
type pathsLocked struct {
|
||||
paths []string
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
|
||||
res := DedupResult{}
|
||||
hashToPaths := make(map[string]*pathsLocked)
|
||||
lock := sync.Mutex{} // protects `hashToPaths` and `res`
|
||||
|
||||
dedup, err := newDedupFiles()
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
logrus.Debugf("Deduping directory %s", dir)
|
||||
if err := pwalkdir.Walk(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := uint64(info.Size())
|
||||
if size == 0 {
|
||||
// do not bother with empty files
|
||||
return nil
|
||||
}
|
||||
|
||||
// the file was already deduplicated
|
||||
if visited, err := dedup.isFirstVisitOf(info); err != nil {
|
||||
return err
|
||||
} else if visited {
|
||||
return nil
|
||||
}
|
||||
|
||||
h, err := getFileChecksum(options.HashMethod, path, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
item, foundItem := hashToPaths[h]
|
||||
if !foundItem {
|
||||
item = &pathsLocked{paths: []string{path}}
|
||||
hashToPaths[h] = item
|
||||
lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
item.lock.Lock()
|
||||
lock.Unlock()
|
||||
|
||||
dedupBytes, err := func() (uint64, error) { // function to have a scope for the defer statement
|
||||
defer item.lock.Unlock()
|
||||
|
||||
var dedupBytes uint64
|
||||
for _, src := range item.paths {
|
||||
deduped, err := dedup.dedup(src, path, info)
|
||||
if err == nil && deduped > 0 {
|
||||
logrus.Debugf("Deduped %q -> %q (%d bytes)", src, path, deduped)
|
||||
dedupBytes += deduped
|
||||
break
|
||||
}
|
||||
logrus.Debugf("Failed to deduplicate: %v", err)
|
||||
if errors.Is(err, notSupported) {
|
||||
return dedupBytes, err
|
||||
}
|
||||
}
|
||||
if dedupBytes == 0 {
|
||||
item.paths = append(item.paths, path)
|
||||
}
|
||||
return dedupBytes, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
res.Deduped += dedupBytes
|
||||
lock.Unlock()
|
||||
return nil
|
||||
}); err != nil {
|
||||
// if reflinks are not supported, return immediately without errors
|
||||
if errors.Is(err, notSupported) {
|
||||
return res, nil
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
139
vendor/github.com/containers/storage/internal/dedup/dedup_linux.go
generated
vendored
Normal file
139
vendor/github.com/containers/storage/internal/dedup/dedup_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
package dedup
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type deviceInodePair struct {
|
||||
dev uint64
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type dedupFiles struct {
|
||||
lock sync.Mutex
|
||||
visitedInodes map[deviceInodePair]struct{}
|
||||
}
|
||||
|
||||
func newDedupFiles() (*dedupFiles, error) {
|
||||
return &dedupFiles{
|
||||
visitedInodes: make(map[deviceInodePair]struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *dedupFiles) recordInode(dev, ino uint64) (bool, error) {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
|
||||
di := deviceInodePair{
|
||||
dev: dev,
|
||||
ino: ino,
|
||||
}
|
||||
|
||||
_, visited := d.visitedInodes[di]
|
||||
d.visitedInodes[di] = struct{}{}
|
||||
return visited, nil
|
||||
}
|
||||
|
||||
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
|
||||
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("unable to get raw syscall.Stat_t data")
|
||||
}
|
||||
return d.recordInode(uint64(st.Dev), st.Ino)
|
||||
}
|
||||
|
||||
// dedup deduplicates the file at src path to dst path
|
||||
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
|
||||
srcFile, err := os.OpenFile(src, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
dstFile, err := os.OpenFile(dst, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to open destination file: %w", err)
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
stSrc, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
sSrc, ok := stSrc.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
|
||||
}
|
||||
sDest, ok := fiDst.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
|
||||
}
|
||||
if sSrc.Dev == sDest.Dev && sSrc.Ino == sDest.Ino {
|
||||
// same inode, we are dealing with a hard link, no need to deduplicate
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
value := unix.FileDedupeRange{
|
||||
Src_offset: 0,
|
||||
Src_length: uint64(stSrc.Size()),
|
||||
Info: []unix.FileDedupeRangeInfo{
|
||||
{
|
||||
Dest_fd: int64(dstFile.Fd()),
|
||||
Dest_offset: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value)
|
||||
if err == nil {
|
||||
return uint64(value.Info[0].Bytes_deduped), nil
|
||||
}
|
||||
|
||||
if errors.Is(err, unix.ENOTSUP) {
|
||||
return 0, notSupported
|
||||
}
|
||||
return 0, fmt.Errorf("failed to clone file %q: %w", src, err)
|
||||
}
|
||||
|
||||
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
|
||||
size := info.Size()
|
||||
if size == 0 {
|
||||
return fn(nil)
|
||||
}
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if size < 4096 {
|
||||
// small file, read it all
|
||||
data := make([]byte, size)
|
||||
_, err = io.ReadFull(file, data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fn(data)
|
||||
}
|
||||
|
||||
mmap, err := unix.Mmap(int(file.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_PRIVATE)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to mmap file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = unix.Munmap(mmap)
|
||||
}()
|
||||
|
||||
_ = unix.Madvise(mmap, unix.MADV_SEQUENTIAL)
|
||||
|
||||
return fn(mmap)
|
||||
}
|
||||
27
vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go
generated
vendored
Normal file
27
vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
//go:build !linux
|
||||
|
||||
package dedup
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
type dedupFiles struct{}
|
||||
|
||||
func newDedupFiles() (*dedupFiles, error) {
|
||||
return nil, notSupported
|
||||
}
|
||||
|
||||
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
|
||||
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
|
||||
return false, notSupported
|
||||
}
|
||||
|
||||
// dedup deduplicates the file at src path to dst path
|
||||
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
|
||||
return 0, notSupported
|
||||
}
|
||||
|
||||
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
|
||||
return "", notSupported
|
||||
}
|
||||
8
vendor/github.com/containers/storage/layers.go
generated
vendored
8
vendor/github.com/containers/storage/layers.go
generated
vendored
|
|
@ -336,6 +336,9 @@ type rwLayerStore interface {
|
|||
|
||||
// Clean up unreferenced layers
|
||||
GarbageCollect() error
|
||||
|
||||
// Dedup deduplicates layers in the store.
|
||||
dedup(drivers.DedupArgs) (drivers.DedupResult, error)
|
||||
}
|
||||
|
||||
type multipleLockFile struct {
|
||||
|
|
@ -2601,6 +2604,11 @@ func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
|
|||
return r.layersByDigestMap(r.bytocsum, d)
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) dedup(req drivers.DedupArgs) (drivers.DedupResult, error) {
|
||||
return r.driver.Dedup(req)
|
||||
}
|
||||
|
||||
func closeAll(closes ...func() error) (rErr error) {
|
||||
for _, f := range closes {
|
||||
if err := f(); err != nil {
|
||||
|
|
|
|||
55
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
55
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
|
|
@ -78,6 +78,7 @@ const (
|
|||
windows = "windows"
|
||||
darwin = "darwin"
|
||||
freebsd = "freebsd"
|
||||
linux = "linux"
|
||||
)
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
|
|
@ -427,7 +428,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
|||
}
|
||||
for _, xattr := range []string{"security.capability", "security.ima"} {
|
||||
capability, err := system.Lgetxattr(path, xattr)
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
|
||||
return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
|
||||
}
|
||||
if capability != nil {
|
||||
|
|
@ -440,7 +441,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
|
|||
// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
|
||||
func readUserXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||
xattrs, err := system.Llistxattr(path)
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
|
||||
return err
|
||||
}
|
||||
for _, key := range xattrs {
|
||||
|
|
@ -655,12 +656,20 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
hdrInfo := hdr.FileInfo()
|
||||
|
||||
typeFlag := hdr.Typeflag
|
||||
mask := hdrInfo.Mode()
|
||||
|
||||
// update also the implementation of ForceMask in pkg/chunked
|
||||
if forceMask != nil {
|
||||
mask = *forceMask
|
||||
// If we have a forceMask, force the real type to either be a directory,
|
||||
// a link, or a regular file.
|
||||
if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink {
|
||||
typeFlag = tar.TypeReg
|
||||
}
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
switch typeFlag {
|
||||
case tar.TypeDir:
|
||||
// Create directory unless it exists as a directory already.
|
||||
// In that case we just want to merge the two
|
||||
|
|
@ -728,16 +737,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
|
||||
value := idtools.Stat{
|
||||
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
|
||||
Mode: hdrInfo.Mode() & 0o7777,
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Lchown is not supported on Windows.
|
||||
if Lchown && runtime.GOOS != windows {
|
||||
if chownOpts == nil {
|
||||
|
|
@ -793,18 +792,30 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
continue
|
||||
}
|
||||
if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil {
|
||||
if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
|
||||
// We ignore errors here because not all graphdrivers support
|
||||
// xattrs *cough* old versions of AUFS *cough*. However only
|
||||
// ENOTSUP should be emitted in that case, otherwise we still
|
||||
// bail. We also ignore EPERM errors if we are running in a
|
||||
// user namespace.
|
||||
if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
|
||||
// Ignore specific error cases:
|
||||
// - ENOTSUP: Expected for graphdrivers lacking extended attribute support:
|
||||
// - Legacy AUFS versions
|
||||
// - FreeBSD with unsupported namespaces (trusted, security)
|
||||
// - EPERM: Expected when operating within a user namespace
|
||||
// All other errors will cause a failure.
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") {
|
||||
value := idtools.Stat{
|
||||
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
|
||||
Mode: hdrInfo.Mode(),
|
||||
Major: int(hdr.Devmajor),
|
||||
Minor: int(hdr.Devminor),
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We defer setting flags on directories until the end of
|
||||
|
|
@ -1149,11 +1160,11 @@ loop:
|
|||
}
|
||||
|
||||
if options.ForceMask != nil {
|
||||
value := idtools.Stat{Mode: 0o755}
|
||||
value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)}
|
||||
if rootHdr != nil {
|
||||
value.IDs.UID = rootHdr.Uid
|
||||
value.IDs.GID = rootHdr.Gid
|
||||
value.Mode = os.FileMode(rootHdr.Mode)
|
||||
value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode)
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
|
||||
return err
|
||||
|
|
@ -1379,7 +1390,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
|
|||
uid, gid = hdr.Uid, hdr.Gid
|
||||
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) == 3 {
|
||||
if len(attrs) >= 3 {
|
||||
val, err := strconv.ParseUint(attrs[0], 10, 32)
|
||||
if err != nil {
|
||||
uid = int(val)
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
|
|
@ -270,6 +270,7 @@ type FileInfo struct {
|
|||
capability []byte
|
||||
added bool
|
||||
xattrs map[string]string
|
||||
target string
|
||||
}
|
||||
|
||||
// LookUp looks up the file information of a file.
|
||||
|
|
@ -336,6 +337,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
|||
// back mtime
|
||||
if statDifferent(oldStat, oldInfo, newStat, info) ||
|
||||
!bytes.Equal(oldChild.capability, newChild.capability) ||
|
||||
oldChild.target != newChild.target ||
|
||||
!reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
|
|
@ -390,6 +392,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo {
|
|||
name: string(os.PathSeparator),
|
||||
idMappings: idMappings,
|
||||
children: make(map[string]*FileInfo),
|
||||
target: "",
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
|
|
|||
11
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
|
|
@ -79,6 +79,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
|||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
idMappings: root.idMappings,
|
||||
target: "",
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
|
|
@ -87,11 +88,11 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
|||
}
|
||||
info.stat = stat
|
||||
info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
xattrs, err := system.Llistxattr(cpath)
|
||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
|
||||
if err != nil && !errors.Is(err, system.ENOTSUP) {
|
||||
return err
|
||||
}
|
||||
for _, key := range xattrs {
|
||||
|
|
@ -110,6 +111,12 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
|||
info.xattrs[key] = string(value)
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
info.target, err = os.Readlink(cpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
|
|
@ -47,7 +47,7 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error
|
|||
// This should be used to prevent a potential attacker from manipulating `dest`
|
||||
// such that it would provide access to files outside of `dest` through things
|
||||
// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
|
||||
// sanitizing symlinks in this manner is inherrently racey:
|
||||
// sanitizing symlinks in this manner is inherently racey:
|
||||
// ref: CVE-2018-15664
|
||||
func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
|
||||
return untarHandler(tarArchive, dest, options, true, root)
|
||||
|
|
|
|||
12
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/docker/go-units"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
|
@ -710,7 +710,7 @@ func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([
|
|||
switch format {
|
||||
case graphdriver.DifferOutputFormatDir:
|
||||
case graphdriver.DifferOutputFormatFlat:
|
||||
entries, err = makeEntriesFlat(entries)
|
||||
entries, err = makeEntriesFlat(entries, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -848,12 +848,12 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo
|
|||
return "", "", nil
|
||||
}
|
||||
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *minimal.FileMetadata) (string, string, int64, error) {
|
||||
return c.findDigestInternal(chunk.ChunkDigest)
|
||||
}
|
||||
|
||||
func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
var toc internal.TOC
|
||||
func unmarshalToc(manifest []byte) (*minimal.TOC, error) {
|
||||
var toc minimal.TOC
|
||||
|
||||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
|
||||
|
|
@ -864,7 +864,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
|||
|
||||
case "entries":
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
var m minimal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch strings.ToLower(field) {
|
||||
case "type":
|
||||
|
|
|
|||
18
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
|
|
@ -4,18 +4,18 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeReg = internal.TypeReg
|
||||
TypeChunk = internal.TypeChunk
|
||||
TypeLink = internal.TypeLink
|
||||
TypeChar = internal.TypeChar
|
||||
TypeBlock = internal.TypeBlock
|
||||
TypeDir = internal.TypeDir
|
||||
TypeFifo = internal.TypeFifo
|
||||
TypeSymlink = internal.TypeSymlink
|
||||
TypeReg = minimal.TypeReg
|
||||
TypeChunk = minimal.TypeChunk
|
||||
TypeLink = minimal.TypeLink
|
||||
TypeChar = minimal.TypeChar
|
||||
TypeBlock = minimal.TypeBlock
|
||||
TypeDir = minimal.TypeDir
|
||||
TypeFifo = minimal.TypeFifo
|
||||
TypeSymlink = minimal.TypeSymlink
|
||||
)
|
||||
|
||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
|
|
|
|||
189
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
189
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
|
|
@ -10,7 +10,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
|
@ -20,6 +20,12 @@ import (
|
|||
expMaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxTocSize is the maximum size of a blob that we will attempt to process.
|
||||
// It is used to prevent DoS attacks from layers that embed a very large TOC file.
|
||||
maxTocSize = (1 << 20) * 150
|
||||
)
|
||||
|
||||
var typesToTar = map[string]byte{
|
||||
TypeReg: tar.TypeReg,
|
||||
TypeLink: tar.TypeLink,
|
||||
|
|
@ -38,33 +44,35 @@ func typeToTarType(t string) (byte, error) {
|
|||
return r, nil
|
||||
}
|
||||
|
||||
// readEstargzChunkedManifest reads the estargz manifest from the seekable stream blobStream.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) {
|
||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||
footerSize := int64(51)
|
||||
if blobSize <= footerSize {
|
||||
return nil, 0, errors.New("blob too small")
|
||||
}
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: uint64(blobSize - footerSize),
|
||||
Length: uint64(footerSize),
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)})
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
_, err = io.ReadFull(soe.stream, footer)
|
||||
_ = soe.stream.Close()
|
||||
}
|
||||
if soe.err != nil && err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
|
||||
/* Read the ToC offset:
|
||||
- 10 bytes gzip header
|
||||
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||
|
|
@ -81,48 +89,59 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
|
||||
size := int64(blobSize - footerSize - tocOffset)
|
||||
// set a reasonable limit
|
||||
if size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
if size > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz manifest too big to process in memory (%d bytes)", size))
|
||||
}
|
||||
|
||||
chunk = ImageSourceChunk{
|
||||
Offset: uint64(tocOffset),
|
||||
Length: uint64(size),
|
||||
}
|
||||
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)})
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var tocReader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
tocReader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tocReader.Close()
|
||||
var manifestUncompressed []byte
|
||||
|
||||
r, err := pgzip.NewReader(tocReader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer r.Close()
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
err1 := func() error {
|
||||
defer soe.stream.Close()
|
||||
|
||||
aTar := archivetar.NewReader(r)
|
||||
r, err := pgzip.NewReader(soe.stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
aTar := archivetar.NewReader(r)
|
||||
|
||||
manifestUncompressed := make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return nil, 0, err
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > maxTocSize {
|
||||
return errors.New("manifest too big")
|
||||
}
|
||||
|
||||
manifestUncompressed = make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
} else if err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
if manifestUncompressed == nil {
|
||||
return nil, 0, errors.New("manifest not found")
|
||||
}
|
||||
|
||||
manifestDigester := digest.Canonical.Digester()
|
||||
|
|
@ -140,10 +159,11 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ []byte, _ int64, retErr error) {
|
||||
offsetMetadata := annotations[minimal.ManifestInfoKey]
|
||||
if offsetMetadata == "" {
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey)
|
||||
}
|
||||
var manifestChunk ImageSourceChunk
|
||||
var manifestLengthUncompressed, manifestType uint64
|
||||
|
|
@ -153,48 +173,59 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
|
||||
var tarSplitChunk ImageSourceChunk
|
||||
var tarSplitLengthUncompressed uint64
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[minimal.TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if manifestType != internal.ManifestTypeCRFS {
|
||||
if manifestType != minimal.ManifestTypeCRFS {
|
||||
return nil, nil, nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if manifestChunk.Length > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestChunk.Length > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes compressed)", manifestChunk.Length))
|
||||
}
|
||||
if manifestLengthUncompressed > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestLengthUncompressed > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes uncompressed)", manifestLengthUncompressed))
|
||||
}
|
||||
|
||||
chunks := []ImageSourceChunk{manifestChunk}
|
||||
if tarSplitChunk.Offset > 0 {
|
||||
chunks = append(chunks, tarSplitChunk)
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||
|
||||
streamsOrErrors, err := getBlobAt(blobStream, chunks...)
|
||||
if err != nil {
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
|
||||
}
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, err
|
||||
defer func() {
|
||||
err := ensureAllBlobsDone(streamsOrErrors)
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
soe, ok := <-streamsOrErrors
|
||||
if !ok {
|
||||
return nil, errors.New("stream closed")
|
||||
}
|
||||
if soe.err != nil {
|
||||
return nil, soe.err
|
||||
}
|
||||
defer soe.stream.Close()
|
||||
|
||||
blob := make([]byte, len)
|
||||
if _, err := io.ReadFull(reader, blob); err != nil {
|
||||
reader.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err := reader.Close(); err != nil {
|
||||
if _, err := io.ReadFull(soe.stream, blob); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blob, nil
|
||||
|
|
@ -217,7 +248,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
var decodedTarSplit []byte = nil
|
||||
if toc.TarSplitDigest != "" {
|
||||
if tarSplitChunk.Offset <= 0 {
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey)
|
||||
}
|
||||
tarSplit, err := readBlob(tarSplitChunk.Length)
|
||||
if err != nil {
|
||||
|
|
@ -247,11 +278,11 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
}
|
||||
|
||||
// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
|
||||
func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
||||
pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries
|
||||
func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
|
||||
pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries
|
||||
for i := range toc.Entries {
|
||||
e := &toc.Entries[i]
|
||||
if e.Type != internal.TypeChunk {
|
||||
if e.Type != minimal.TypeChunk {
|
||||
if _, ok := pendingFiles[e.Name]; ok {
|
||||
return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name)
|
||||
}
|
||||
|
|
@ -266,7 +297,7 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
|||
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
|
||||
}
|
||||
delete(pendingFiles, hdr.Name)
|
||||
expected, err := internal.NewFileMetadata(hdr)
|
||||
expected, err := minimal.NewFileMetadata(hdr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err)
|
||||
}
|
||||
|
|
@ -347,8 +378,8 @@ func ensureTimePointersMatch(a, b *time.Time) error {
|
|||
|
||||
// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
|
||||
// in the tar stream or matching contents)
|
||||
func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error {
|
||||
// Keep this in sync with internal.FileMetadata!
|
||||
func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error {
|
||||
// Keep this in sync with minimal.FileMetadata!
|
||||
|
||||
if a.Type != b.Type {
|
||||
return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type)
|
||||
|
|
|
|||
24
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
24
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
|
|
@ -9,7 +9,7 @@ import (
|
|||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
|
@ -213,7 +213,7 @@ func newTarSplitData(level int) (*tarSplitData, error) {
|
|||
compressed := bytes.NewBuffer(nil)
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
|
||||
zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -254,7 +254,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
|
||||
buf := make([]byte, 4096)
|
||||
|
||||
zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
|
||||
zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -276,7 +276,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
return offset, nil
|
||||
}
|
||||
|
||||
var metadata []internal.FileMetadata
|
||||
var metadata []minimal.FileMetadata
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
|
|
@ -341,9 +341,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
|
||||
chunkSize := rcReader.WrittenOut - lastChunkOffset
|
||||
if chunkSize > 0 {
|
||||
chunkType := internal.ChunkTypeData
|
||||
chunkType := minimal.ChunkTypeData
|
||||
if rcReader.IsLastChunkZeros {
|
||||
chunkType = internal.ChunkTypeZeros
|
||||
chunkType = minimal.ChunkTypeZeros
|
||||
}
|
||||
|
||||
chunks = append(chunks, chunk{
|
||||
|
|
@ -368,17 +368,17 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
}
|
||||
}
|
||||
|
||||
mainEntry, err := internal.NewFileMetadata(hdr)
|
||||
mainEntry, err := minimal.NewFileMetadata(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mainEntry.Digest = checksum
|
||||
mainEntry.Offset = startOffset
|
||||
mainEntry.EndOffset = lastOffset
|
||||
entries := []internal.FileMetadata{mainEntry}
|
||||
entries := []minimal.FileMetadata{mainEntry}
|
||||
for i := 1; i < len(chunks); i++ {
|
||||
entries = append(entries, internal.FileMetadata{
|
||||
Type: internal.TypeChunk,
|
||||
entries = append(entries, minimal.FileMetadata{
|
||||
Type: minimal.TypeChunk,
|
||||
Name: hdr.Name,
|
||||
ChunkOffset: chunks[i].ChunkOffset,
|
||||
})
|
||||
|
|
@ -424,13 +424,13 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
}
|
||||
tarSplitData.zstd = nil
|
||||
|
||||
ts := internal.TarSplitData{
|
||||
ts := minimal.TarSplitData{
|
||||
Data: tarSplitData.compressed.Bytes(),
|
||||
Digest: tarSplitData.digester.Digest(),
|
||||
UncompressedSize: tarSplitData.uncompressedCounter.Count,
|
||||
}
|
||||
|
||||
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
|
||||
return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
|
||||
}
|
||||
|
||||
type zstdChunkedWriter struct {
|
||||
|
|
|
|||
66
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
66
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
|
|
@ -9,10 +9,11 @@ import (
|
|||
"io"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
@ -85,17 +86,17 @@ func escapedOptional(val []byte, escape int) string {
|
|||
|
||||
func getStMode(mode uint32, typ string) (uint32, error) {
|
||||
switch typ {
|
||||
case internal.TypeReg, internal.TypeLink:
|
||||
case minimal.TypeReg, minimal.TypeLink:
|
||||
mode |= unix.S_IFREG
|
||||
case internal.TypeChar:
|
||||
case minimal.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case internal.TypeBlock:
|
||||
case minimal.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case internal.TypeDir:
|
||||
case minimal.TypeDir:
|
||||
mode |= unix.S_IFDIR
|
||||
case internal.TypeFifo:
|
||||
case minimal.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
case internal.TypeSymlink:
|
||||
case minimal.TypeSymlink:
|
||||
mode |= unix.S_IFLNK
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown type %s", typ)
|
||||
|
|
@ -103,24 +104,14 @@ func getStMode(mode uint32, typ string) (uint32, error) {
|
|||
return mode, nil
|
||||
}
|
||||
|
||||
func sanitizeName(name string) string {
|
||||
path := filepath.Clean(name)
|
||||
if path == "." {
|
||||
path = "/"
|
||||
} else if path[0] != '/' {
|
||||
path = "/" + path
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
|
||||
path := sanitizeName(entry.Name)
|
||||
func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *minimal.FileMetadata) error {
|
||||
path := storagePath.CleanAbsPath(entry.Name)
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
if _, found := added[parent]; !found && path != "/" {
|
||||
parentEntry := &internal.FileMetadata{
|
||||
parentEntry := &minimal.FileMetadata{
|
||||
Name: parent,
|
||||
Type: internal.TypeDir,
|
||||
Type: minimal.TypeDir,
|
||||
Mode: 0o755,
|
||||
}
|
||||
if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil {
|
||||
|
|
@ -143,7 +134,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
|||
|
||||
nlinks := links[entry.Name] + links[entry.Linkname] + 1
|
||||
link := ""
|
||||
if entry.Type == internal.TypeLink {
|
||||
if entry.Type == minimal.TypeLink {
|
||||
link = "@"
|
||||
}
|
||||
|
||||
|
|
@ -169,16 +160,21 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
|||
|
||||
var payload string
|
||||
if entry.Linkname != "" {
|
||||
if entry.Type == internal.TypeSymlink {
|
||||
if entry.Type == minimal.TypeSymlink {
|
||||
payload = entry.Linkname
|
||||
} else {
|
||||
payload = sanitizeName(entry.Linkname)
|
||||
payload = storagePath.CleanAbsPath(entry.Linkname)
|
||||
}
|
||||
} else {
|
||||
if len(entry.Digest) > 10 {
|
||||
d := strings.Replace(entry.Digest, "sha256:", "", 1)
|
||||
payload = d[:2] + "/" + d[2:]
|
||||
} else if entry.Digest != "" {
|
||||
d, err := digest.Parse(entry.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid digest %q for %q: %w", entry.Digest, entry.Name, err)
|
||||
}
|
||||
path, err := storagePath.RegularFilePathForValidatedDigest(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining physical file path for %q: %w", entry.Name, err)
|
||||
}
|
||||
payload = path
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil {
|
||||
|
|
@ -219,7 +215,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
|
|||
|
||||
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
|
||||
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
|
||||
toc, ok := tocI.(*internal.TOC)
|
||||
toc, ok := tocI.(*minimal.TOC)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid TOC type")
|
||||
}
|
||||
|
|
@ -235,21 +231,21 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
|
|||
}()
|
||||
|
||||
links := make(map[string]int)
|
||||
added := make(map[string]*internal.FileMetadata)
|
||||
added := make(map[string]*minimal.FileMetadata)
|
||||
for _, e := range toc.Entries {
|
||||
if e.Linkname == "" {
|
||||
continue
|
||||
}
|
||||
if e.Type == internal.TypeSymlink {
|
||||
if e.Type == minimal.TypeSymlink {
|
||||
continue
|
||||
}
|
||||
links[e.Linkname] = links[e.Linkname] + 1
|
||||
}
|
||||
|
||||
if len(toc.Entries) == 0 {
|
||||
root := &internal.FileMetadata{
|
||||
root := &minimal.FileMetadata{
|
||||
Name: "/",
|
||||
Type: internal.TypeDir,
|
||||
Type: minimal.TypeDir,
|
||||
Mode: 0o755,
|
||||
}
|
||||
|
||||
|
|
@ -261,7 +257,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
|
|||
}
|
||||
|
||||
for _, e := range toc.Entries {
|
||||
if e.Type == internal.TypeChunk {
|
||||
if e.Type == minimal.TypeChunk {
|
||||
continue
|
||||
}
|
||||
if err := dumpNode(w, added, links, verityDigests, &e); err != nil {
|
||||
|
|
|
|||
73
vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
generated
vendored
73
vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
generated
vendored
|
|
@ -15,7 +15,8 @@ import (
|
|||
|
||||
driversCopy "github.com/containers/storage/drivers/copy"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"golang.org/x/sys/unix"
|
||||
|
|
@ -34,14 +35,14 @@ func procPathForFd(fd int) string {
|
|||
return fmt.Sprintf("/proc/self/fd/%d", fd)
|
||||
}
|
||||
|
||||
// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
|
||||
// fileMetadata is a wrapper around minimal.FileMetadata with additional private fields that
|
||||
// are not part of the TOC document.
|
||||
// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
|
||||
type fileMetadata struct {
|
||||
internal.FileMetadata
|
||||
minimal.FileMetadata
|
||||
|
||||
// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
|
||||
chunks []*internal.FileMetadata
|
||||
chunks []*minimal.FileMetadata
|
||||
|
||||
// skipSetAttrs is set when the file attributes must not be
|
||||
// modified, e.g. it is a hard link from a different source,
|
||||
|
|
@ -49,10 +50,37 @@ type fileMetadata struct {
|
|||
skipSetAttrs bool
|
||||
}
|
||||
|
||||
// splitPath takes a file path as input and returns two components: dir and base.
|
||||
// Differently than filepath.Split(), this function handles some edge cases.
|
||||
// If the path refers to a file in the root directory, the returned dir is "/".
|
||||
// The returned base value is never empty, it never contains any slash and the
|
||||
// value "..".
|
||||
func splitPath(path string) (string, string, error) {
|
||||
path = storagePath.CleanAbsPath(path)
|
||||
dir, base := filepath.Split(path)
|
||||
if base == "" {
|
||||
base = "."
|
||||
}
|
||||
// Remove trailing slashes from dir, but make sure that "/" is preserved.
|
||||
dir = strings.TrimSuffix(dir, "/")
|
||||
if dir == "" {
|
||||
dir = "/"
|
||||
}
|
||||
|
||||
if strings.Contains(base, "/") {
|
||||
// This should never happen, but be safe as the base is passed to *at syscalls.
|
||||
return "", "", fmt.Errorf("internal error: splitPath(%q) contains a slash", path)
|
||||
}
|
||||
return dir, base, nil
|
||||
}
|
||||
|
||||
func doHardLink(dirfd, srcFd int, destFile string) error {
|
||||
destDir, destBase := filepath.Split(destFile)
|
||||
destDir, destBase, err := splitPath(destFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDirFd := dirfd
|
||||
if destDir != "" && destDir != "." {
|
||||
if destDir != "/" {
|
||||
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -72,7 +100,7 @@ func doHardLink(dirfd, srcFd int, destFile string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
err := doLink()
|
||||
err = doLink()
|
||||
|
||||
// if the destination exists, unlink it first and try again
|
||||
if err != nil && os.IsExist(err) {
|
||||
|
|
@ -281,8 +309,11 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
|
|||
// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
|
||||
// last component as the path to openat().
|
||||
if hasNoFollow {
|
||||
dirName, baseName := filepath.Split(name)
|
||||
if dirName != "" && dirName != "." {
|
||||
dirName, baseName, err := splitPath(name)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if dirName != "/" {
|
||||
newRoot, err := securejoin.SecureJoin(root, dirName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
|
|
@ -409,7 +440,8 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil
|
|||
|
||||
if errors.Is(err, unix.ENOENT) {
|
||||
parent := filepath.Dir(name)
|
||||
if parent != "" {
|
||||
// do not create the root directory, it should always exist
|
||||
if parent != name {
|
||||
pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode)
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
|
|
@ -448,9 +480,12 @@ func appendHole(fd int, name string, size int64) error {
|
|||
}
|
||||
|
||||
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
|
||||
parent, base := filepath.Split(name)
|
||||
parent, base, err := splitPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parentFd := dirfd
|
||||
if parent != "" && parent != "." {
|
||||
if parent != "/" {
|
||||
parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -506,9 +541,12 @@ func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *arch
|
|||
}
|
||||
|
||||
func safeSymlink(dirfd int, metadata *fileMetadata) error {
|
||||
destDir, destBase := filepath.Split(metadata.Name)
|
||||
destDir, destBase, err := splitPath(metadata.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDirFd := dirfd
|
||||
if destDir != "" && destDir != "." {
|
||||
if destDir != "/" {
|
||||
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -542,9 +580,12 @@ func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
|
|||
}
|
||||
|
||||
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
|
||||
dir, base := filepath.Split(path)
|
||||
dir, base, err := splitPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirfd := d.Dirfd
|
||||
if dir != "" && dir != "." {
|
||||
if dir != "/" {
|
||||
dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package internal
|
||||
package minimal
|
||||
|
||||
// NOTE: This is used from github.com/containers/image by callers that
|
||||
// don't otherwise use containers/storage, so don't make this depend on any
|
||||
27
vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
generated
vendored
Normal file
27
vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
package path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// CleanAbsPath removes any ".." and "." from the path
|
||||
// and ensures it starts with a "/". If the path refers to the root
|
||||
// directory, it returns "/".
|
||||
func CleanAbsPath(path string) string {
|
||||
return filepath.Clean("/" + path)
|
||||
}
|
||||
|
||||
// RegularFilePath returns the path used in the composefs backing store for a
|
||||
// regular file with the provided content digest.
|
||||
//
|
||||
// The caller MUST ensure d is a valid digest (in particular, that it contains no path separators or .. entries)
|
||||
func RegularFilePathForValidatedDigest(d digest.Digest) (string, error) {
|
||||
if algo := d.Algorithm(); algo != digest.SHA256 {
|
||||
return "", fmt.Errorf("unexpected digest algorithm %q", algo)
|
||||
}
|
||||
e := d.Encoded()
|
||||
return e[0:2] + "/" + e[2:], nil
|
||||
}
|
||||
614
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
614
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package chunked
|
|||
|
||||
import (
|
||||
archivetar "archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
|
|
@ -22,17 +23,21 @@ import (
|
|||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
path "github.com/containers/storage/pkg/chunked/internal/path"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/fsverity"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
tsStorage "github.com/vbatts/tar-split/tar/storage"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
@ -57,46 +62,53 @@ const (
|
|||
type compressedFileType int
|
||||
|
||||
type chunkedDiffer struct {
|
||||
// Initial parameters, used throughout and never modified
|
||||
// ==========
|
||||
pullOptions pullOptions
|
||||
stream ImageSourceSeekable
|
||||
manifest []byte
|
||||
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
layersCache *layersCache
|
||||
tocOffset int64
|
||||
fileType compressedFileType
|
||||
|
||||
copyBuffer []byte
|
||||
|
||||
gzipReader *pgzip.Reader
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled.
|
||||
tocDigest digest.Digest
|
||||
// blobDigest is the digest of the whole compressed layer. It is used if
|
||||
// convertToZstdChunked to validate a layer when it is converted since there
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
blobSize int64
|
||||
|
||||
// Input format
|
||||
// ==========
|
||||
fileType compressedFileType
|
||||
// convertedToZstdChunked is set to true if the layer needs to
|
||||
// be converted to the zstd:chunked format before it can be
|
||||
// handled.
|
||||
convertToZstdChunked bool
|
||||
|
||||
// Chunked metadata
|
||||
// This is usually set in GetDiffer, but if convertToZstdChunked, it is only computed in chunkedDiffer.ApplyDiff
|
||||
// ==========
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled, or "" if not relevant to consumers.
|
||||
tocDigest digest.Digest
|
||||
tocOffset int64
|
||||
manifest []byte
|
||||
toc *minimal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
// skipValidation is set to true if the individual files in
|
||||
// the layer are trusted and should not be validated.
|
||||
skipValidation bool
|
||||
|
||||
// blobDigest is the digest of the whole compressed layer. It is used if
|
||||
// convertToZstdChunked to validate a layer when it is converted since there
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
|
||||
blobSize int64
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
|
||||
pullOptions map[string]string
|
||||
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
// Long-term caches
|
||||
// This is set in GetDiffer, when the caller must not hold any storage locks, and later consumed in .ApplyDiff()
|
||||
// ==========
|
||||
layersCache *layersCache
|
||||
copyBuffer []byte
|
||||
fsVerityMutex sync.Mutex // protects fsVerityDigests
|
||||
fsVerityDigests map[string]string
|
||||
fsVerityMutex sync.Mutex
|
||||
|
||||
// Private state of .ApplyDiff
|
||||
// ==========
|
||||
gzipReader *pgzip.Reader
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
|
|
@ -108,6 +120,42 @@ type chunkedLayerData struct {
|
|||
Format graphdriver.DifferOutputFormat `json:"format"`
|
||||
}
|
||||
|
||||
// pullOptions contains parsed data from storage.Store.PullOptions.
|
||||
// TO DO: ideally this should be parsed along with the rest of the config file into StoreOptions directly
|
||||
// (and then storage.Store.PullOptions would need to be somehow simulated).
|
||||
type pullOptions struct {
|
||||
enablePartialImages bool // enable_partial_images
|
||||
convertImages bool // convert_images
|
||||
useHardLinks bool // use_hard_links
|
||||
insecureAllowUnpredictableImageContents bool // insecure_allow_unpredictable_image_contents
|
||||
ostreeRepos []string // ostree_repos
|
||||
}
|
||||
|
||||
func parsePullOptions(store storage.Store) pullOptions {
|
||||
options := store.PullOptions()
|
||||
|
||||
res := pullOptions{}
|
||||
for _, e := range []struct {
|
||||
dest *bool
|
||||
name string
|
||||
defaultValue bool
|
||||
}{
|
||||
{&res.enablePartialImages, "enable_partial_images", false},
|
||||
{&res.convertImages, "convert_images", false},
|
||||
{&res.useHardLinks, "use_hard_links", false},
|
||||
{&res.insecureAllowUnpredictableImageContents, "insecure_allow_unpredictable_image_contents", false},
|
||||
} {
|
||||
if value, ok := options[e.name]; ok {
|
||||
*e.dest = strings.ToLower(value) == "true"
|
||||
} else {
|
||||
*e.dest = e.defaultValue
|
||||
}
|
||||
}
|
||||
res.ostreeRepos = strings.Split(options["ostree_repos"], ":")
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
|
||||
diff, err := archive.DecompressStream(payload)
|
||||
if err != nil {
|
||||
|
|
@ -144,127 +192,160 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
|
|||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// If it returns an error that matches ErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// retry the operation with a different method.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
pullOptions := store.PullOptions()
|
||||
pullOptions := parsePullOptions(store)
|
||||
|
||||
if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) {
|
||||
// If convertImages is set, the two options disagree whether fallback is permissible.
|
||||
if !pullOptions.enablePartialImages {
|
||||
// If pullOptions.convertImages is set, the two options disagree whether fallback is permissible.
|
||||
// Right now, we enable it, but that’s not a promise; rather, such a configuration should ideally be rejected.
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
|
||||
}
|
||||
// convertImages also serves as a “must not fallback to non-partial pull” option (?!)
|
||||
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
|
||||
// pullOptions.convertImages also serves as a “must not fallback to non-partial pull” option (?!)
|
||||
|
||||
graphDriver, err := store.GraphDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported {
|
||||
if convertImages {
|
||||
if pullOptions.convertImages {
|
||||
return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String())
|
||||
}
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String()))
|
||||
}
|
||||
|
||||
differ, canFallback, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
|
||||
differ, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
|
||||
if err != nil {
|
||||
if !canFallback {
|
||||
var fallbackErr ErrFallbackToOrdinaryLayerDownload
|
||||
if !errors.As(err, &fallbackErr) {
|
||||
return nil, err
|
||||
}
|
||||
// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
|
||||
if convertImages {
|
||||
logrus.Debugf("Created differ to convert blob %q", blobDigest)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
if !pullOptions.convertImages {
|
||||
return nil, err
|
||||
}
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(err)
|
||||
var canConvertErr errFallbackCanConvert
|
||||
if !errors.As(err, &canConvertErr) {
|
||||
// We are supposed to use makeConvertFromRawDiffer, but that would not work.
|
||||
// Fail, and make sure the error does _not_ match ErrFallbackToOrdinaryLayerDownload: use only the error text,
|
||||
// discard all type information.
|
||||
return nil, fmt.Errorf("neither a partial pull nor convert_images is possible: %s", err.Error())
|
||||
}
|
||||
logrus.Debugf("Created differ to convert blob %q", blobDigest)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
}
|
||||
|
||||
return differ, nil
|
||||
}
|
||||
|
||||
// errFallbackCanConvert is an an error type _accompanying_ ErrFallbackToOrdinaryLayerDownload
|
||||
// within getProperDiffer, to mark that using makeConvertFromRawDiffer makes sense.
|
||||
// This is used to distinguish between cases where the environment does not support partial pulls
|
||||
// (e.g. a registry does not support range requests) and convert_images is still possible,
|
||||
// from cases where the image content is unacceptable for partial pulls (e.g. exceeds memory limits)
|
||||
// and convert_images would not help.
|
||||
type errFallbackCanConvert struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errFallbackCanConvert) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
func (e errFallbackCanConvert) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// getProperDiffer is an implementation detail of GetDiffer.
|
||||
// It returns a “proper” differ (not a convert_images one) if possible.
|
||||
// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
|
||||
// is permissible.
|
||||
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) {
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
// May return an error matching ErrFallbackToOrdinaryLayerDownload if a fallback to an alternative
|
||||
// (either makeConvertFromRawDiffer, or a non-partial pull) is permissible.
|
||||
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (graphdriver.Differ, error) {
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[minimal.ManifestChecksumKey]
|
||||
estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||
|
||||
switch {
|
||||
case hasZstdChunkedTOC && hasEstargzTOC:
|
||||
return nil, false, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
|
||||
case hasZstdChunkedTOC:
|
||||
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err)
|
||||
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
|
||||
var badRequestErr ErrBadRequest
|
||||
return nil, errors.As(err, &badRequestErr), err
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
|
||||
return differ, false, nil
|
||||
return differ, nil
|
||||
|
||||
case hasEstargzTOC:
|
||||
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err)
|
||||
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
|
||||
var badRequestErr ErrBadRequest
|
||||
return nil, errors.As(err, &badRequestErr), err
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
|
||||
return differ, false, nil
|
||||
return differ, nil
|
||||
|
||||
default: // no TOC
|
||||
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
|
||||
if !convertImages {
|
||||
return nil, true, errors.New("no TOC found and convert_images is not configured")
|
||||
message := "no TOC found"
|
||||
if !pullOptions.convertImages {
|
||||
message = "no TOC found and convert_images is not configured"
|
||||
}
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(errors.New(message)),
|
||||
}
|
||||
return nil, true, errors.New("no TOC found")
|
||||
}
|
||||
}
|
||||
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
layersCache, err := getLayersCache(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // Will be computed later
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
|
||||
convertToZstdChunked: true,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
layersCache: layersCache,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
|
||||
uncompressedTarSize: -1, // Will be computed later
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
// makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
if err != nil {
|
||||
if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
||||
var uncompressedTarSize int64 = -1
|
||||
if tarSplit != nil {
|
||||
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("computing size from tar-split: %w", err)
|
||||
}
|
||||
} else if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked layers without tar-split data don't support partial pulls with guaranteed consistency with non-partial pulls")),
|
||||
}
|
||||
}
|
||||
|
||||
layersCache, err := getLayersCache(store)
|
||||
|
|
@ -273,25 +354,36 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
|
|||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: uncompressedTarSize,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobSize: blobSize,
|
||||
|
||||
fileType: fileTypeZstdChunked,
|
||||
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
tocOffset: tocOffset,
|
||||
manifest: manifest,
|
||||
toc: toc,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
uncompressedTarSize: uncompressedTarSize,
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
// makeEstargzChunkedDiffer sets up a chunkedDiffer for an estargz layer.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
|
||||
return nil, errFallbackCanConvert{
|
||||
newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz layers don't support partial pulls with guaranteed consistency with non-partial pulls")),
|
||||
}
|
||||
}
|
||||
|
||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
|
||||
if err != nil {
|
||||
if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
layersCache, err := getLayersCache(store)
|
||||
|
|
@ -300,17 +392,20 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig
|
|||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
blobSize: blobSize,
|
||||
|
||||
fileType: fileTypeEstargz,
|
||||
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeEstargz,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tocOffset: tocOffset,
|
||||
manifest: manifest,
|
||||
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||
|
||||
layersCache: layersCache,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fsVerityDigests: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -391,7 +486,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
|
|||
}
|
||||
// fill only the attributes used by canDedupMetadataWithHardLink.
|
||||
otherFile := fileMetadata{
|
||||
FileMetadata: internal.FileMetadata{
|
||||
FileMetadata: minimal.FileMetadata{
|
||||
UID: int(st.Uid),
|
||||
GID: int(st.Gid),
|
||||
Mode: int64(st.Mode),
|
||||
|
|
@ -735,7 +830,12 @@ func (d *destinationFile) Close() (Err error) {
|
|||
}
|
||||
}
|
||||
|
||||
return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
|
||||
mode := os.FileMode(d.metadata.Mode)
|
||||
if d.options.ForceMask != nil {
|
||||
mode = *d.options.ForceMask
|
||||
}
|
||||
|
||||
return setFileAttrs(d.dirfd, d.file, mode, d.metadata, d.options, false)
|
||||
}
|
||||
|
||||
func closeDestinationFiles(files chan *destinationFile, errors chan error) {
|
||||
|
|
@ -1038,13 +1138,6 @@ type hardLinkToCreate struct {
|
|||
metadata *fileMetadata
|
||||
}
|
||||
|
||||
func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool {
|
||||
if value, ok := pullOptions[name]; ok {
|
||||
return strings.ToLower(value) == "true"
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
type findAndCopyFileOptions struct {
|
||||
useHardLinks bool
|
||||
ostreeRepos []string
|
||||
|
|
@ -1111,10 +1204,13 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
||||
// makeEntriesFlat collects regular-file entries from mergedEntries, and produces a new list
|
||||
// where each file content is only represented once, and uses composefs.RegularFilePathForValidatedDigest for its name.
|
||||
// If flatPathNameMap is not nil, this function writes to it a mapping from filepath.Clean(originalName) to the composefs name.
|
||||
func makeEntriesFlat(mergedEntries []fileMetadata, flatPathNameMap map[string]string) ([]fileMetadata, error) {
|
||||
var new []fileMetadata
|
||||
|
||||
hashes := make(map[string]string)
|
||||
knownFlatPaths := make(map[string]struct{})
|
||||
for i := range mergedEntries {
|
||||
if mergedEntries[i].Type != TypeReg {
|
||||
continue
|
||||
|
|
@ -1124,16 +1220,22 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
|||
}
|
||||
digest, err := digest.Parse(mergedEntries[i].Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("invalid digest %q for %q: %w", mergedEntries[i].Digest, mergedEntries[i].Name, err)
|
||||
}
|
||||
path, err := path.RegularFilePathForValidatedDigest(digest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("determining physical file path for %q: %w", mergedEntries[i].Name, err)
|
||||
}
|
||||
if flatPathNameMap != nil {
|
||||
flatPathNameMap[filepath.Clean(mergedEntries[i].Name)] = path
|
||||
}
|
||||
d := digest.Encoded()
|
||||
|
||||
if hashes[d] != "" {
|
||||
if _, known := knownFlatPaths[path]; known {
|
||||
continue
|
||||
}
|
||||
hashes[d] = d
|
||||
knownFlatPaths[path] = struct{}{}
|
||||
|
||||
mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
|
||||
mergedEntries[i].Name = path
|
||||
mergedEntries[i].skipSetAttrs = true
|
||||
|
||||
new = append(new, mergedEntries[i])
|
||||
|
|
@ -1141,44 +1243,140 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
|||
return new, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
|
||||
var payload io.ReadCloser
|
||||
var streams chan io.ReadCloser
|
||||
var errs chan error
|
||||
var err error
|
||||
type streamOrErr struct {
|
||||
stream io.ReadCloser
|
||||
err error
|
||||
}
|
||||
|
||||
chunksToRequest := []ImageSourceChunk{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: uint64(c.blobSize),
|
||||
},
|
||||
// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered.
|
||||
func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) {
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
_ = soe.stream.Close()
|
||||
} else if retErr == nil {
|
||||
retErr = soe.err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
|
||||
// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends
|
||||
// either a stream or an error to the stream channel. The streams channel is closed when
|
||||
// there are no more streams and errors to read.
|
||||
// It ensures that no more than maxStreams streams are returned, and that every item from the
|
||||
// streams and errs channels is consumed.
|
||||
func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) {
|
||||
tooManyStreams := false
|
||||
streamsSoFar := 0
|
||||
|
||||
err := errors.New("Unexpected error in getBlobAtGoroutine")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
close(stream)
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case p, ok := <-streams:
|
||||
if !ok {
|
||||
streams = nil
|
||||
break loop
|
||||
}
|
||||
if streamsSoFar >= maxStreams {
|
||||
tooManyStreams = true
|
||||
_ = p.Close()
|
||||
continue
|
||||
}
|
||||
streamsSoFar++
|
||||
stream <- streamOrErr{stream: p}
|
||||
case err, ok := <-errs:
|
||||
if !ok {
|
||||
errs = nil
|
||||
break loop
|
||||
}
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
}
|
||||
if streams != nil {
|
||||
for p := range streams {
|
||||
if streamsSoFar >= maxStreams {
|
||||
tooManyStreams = true
|
||||
_ = p.Close()
|
||||
continue
|
||||
}
|
||||
streamsSoFar++
|
||||
stream <- streamOrErr{stream: p}
|
||||
}
|
||||
}
|
||||
if errs != nil {
|
||||
for err := range errs {
|
||||
stream <- streamOrErr{err: err}
|
||||
}
|
||||
}
|
||||
if tooManyStreams {
|
||||
stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
|
||||
// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt.
|
||||
// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel
|
||||
// always succeeds in select.
|
||||
// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`.
|
||||
func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) {
|
||||
streams, errs, err := is.GetBlobAt(chunksToRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream := make(chan streamOrErr)
|
||||
go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest))
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
|
||||
streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
select {
|
||||
case p := <-streams:
|
||||
payload = p
|
||||
case err := <-errs:
|
||||
return "", err
|
||||
}
|
||||
if payload == nil {
|
||||
return "", errors.New("invalid stream returned")
|
||||
}
|
||||
defer payload.Close()
|
||||
|
||||
originalRawDigester := digest.Canonical.Digester()
|
||||
for soe := range streamsOrErrors {
|
||||
if soe.stream != nil {
|
||||
r := io.TeeReader(soe.stream, originalRawDigester.Hash())
|
||||
|
||||
r := io.TeeReader(payload, originalRawDigester.Hash())
|
||||
|
||||
// copy the entire tarball and compute its digest
|
||||
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
|
||||
|
||||
// copy the entire tarball and compute its digest
|
||||
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
|
||||
_ = soe.stream.Close()
|
||||
}
|
||||
if soe.err != nil && err == nil {
|
||||
err = soe.err
|
||||
}
|
||||
}
|
||||
return originalRawDigester.Digest(), err
|
||||
}
|
||||
|
||||
func typeToOsMode(typ string) (os.FileMode, error) {
|
||||
switch typ {
|
||||
case TypeReg, TypeLink:
|
||||
return 0, nil
|
||||
case TypeSymlink:
|
||||
return os.ModeSymlink, nil
|
||||
case TypeDir:
|
||||
return os.ModeDir, nil
|
||||
case TypeChar:
|
||||
return os.ModeDevice | os.ModeCharDevice, nil
|
||||
case TypeBlock:
|
||||
return os.ModeDevice, nil
|
||||
case TypeFifo:
|
||||
return os.ModeNamedPipe, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unknown file type %q", typ)
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
defer c.layersCache.release()
|
||||
defer func() {
|
||||
|
|
@ -1298,13 +1496,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
Size: c.uncompressedTarSize,
|
||||
}
|
||||
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
// modifies the source file as well.
|
||||
useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false)
|
||||
|
||||
// List of OSTree repositories to use for deduplication
|
||||
ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":")
|
||||
|
||||
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
|
||||
var missingParts []missingPart
|
||||
|
|
@ -1325,7 +1516,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
if err == nil {
|
||||
value := idtools.Stat{
|
||||
IDs: idtools.IDPair{UID: int(uid), GID: int(gid)},
|
||||
Mode: os.FileMode(mode),
|
||||
Mode: os.ModeDir | os.FileMode(mode),
|
||||
}
|
||||
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
|
||||
return output, err
|
||||
|
|
@ -1337,16 +1528,20 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
if err != nil {
|
||||
return output, &fs.PathError{Op: "open", Path: dest, Err: err}
|
||||
}
|
||||
defer unix.Close(dirfd)
|
||||
dirFile := os.NewFile(uintptr(dirfd), dest)
|
||||
defer dirFile.Close()
|
||||
|
||||
var flatPathNameMap map[string]string // = nil
|
||||
if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
|
||||
mergedEntries, err = makeEntriesFlat(mergedEntries)
|
||||
flatPathNameMap = map[string]string{}
|
||||
mergedEntries, err = makeEntriesFlat(mergedEntries, flatPathNameMap)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
createdDirs := make(map[string]struct{})
|
||||
for _, e := range mergedEntries {
|
||||
d := e.Name[0:2]
|
||||
// This hard-codes an assumption that RegularFilePathForValidatedDigest creates paths with exactly one directory component.
|
||||
d := filepath.Dir(e.Name)
|
||||
if _, found := createdDirs[d]; !found {
|
||||
if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
|
||||
return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
|
||||
|
|
@ -1363,8 +1558,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
missingPartsSize, totalChunksSize := int64(0), int64(0)
|
||||
|
||||
copyOptions := findAndCopyFileOptions{
|
||||
useHardLinks: useHardLinks,
|
||||
ostreeRepos: ostreeRepos,
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
// modifies the source file as well.
|
||||
useHardLinks: c.pullOptions.useHardLinks,
|
||||
ostreeRepos: c.pullOptions.ostreeRepos, // List of OSTree repositories to use for deduplication
|
||||
options: options,
|
||||
}
|
||||
|
||||
|
|
@ -1408,13 +1605,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
filesToWaitFor := 0
|
||||
for i := range mergedEntries {
|
||||
r := &mergedEntries[i]
|
||||
if options.ForceMask != nil {
|
||||
value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode))
|
||||
if r.Xattrs == nil {
|
||||
r.Xattrs = make(map[string]string)
|
||||
}
|
||||
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
|
||||
mode := os.FileMode(r.Mode)
|
||||
|
||||
|
|
@ -1423,10 +1613,37 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
return output, err
|
||||
}
|
||||
|
||||
r.Name = filepath.Clean(r.Name)
|
||||
size := r.Size
|
||||
|
||||
// update also the implementation of ForceMask in pkg/archive
|
||||
if options.ForceMask != nil {
|
||||
mode = *options.ForceMask
|
||||
|
||||
// special files will be stored as regular files
|
||||
if t != tar.TypeDir && t != tar.TypeSymlink && t != tar.TypeReg && t != tar.TypeLink {
|
||||
t = tar.TypeReg
|
||||
size = 0
|
||||
}
|
||||
|
||||
// if the entry will be stored as a directory or a regular file, store in a xattr the original
|
||||
// owner and mode.
|
||||
if t == tar.TypeDir || t == tar.TypeReg {
|
||||
typeMode, err := typeToOsMode(r.Type)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
value := idtools.FormatContainersOverrideXattrDevice(r.UID, r.GID, typeMode|fs.FileMode(r.Mode), int(r.Devmajor), int(r.Devminor))
|
||||
if r.Xattrs == nil {
|
||||
r.Xattrs = make(map[string]string)
|
||||
}
|
||||
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
}
|
||||
|
||||
r.Name = path.CleanAbsPath(r.Name)
|
||||
// do not modify the value of symlinks
|
||||
if r.Linkname != "" && t != tar.TypeSymlink {
|
||||
r.Linkname = filepath.Clean(r.Linkname)
|
||||
r.Linkname = path.CleanAbsPath(r.Linkname)
|
||||
}
|
||||
|
||||
if whiteoutConverter != nil {
|
||||
|
|
@ -1434,8 +1651,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
Typeflag: t,
|
||||
Name: r.Name,
|
||||
Linkname: r.Linkname,
|
||||
Size: r.Size,
|
||||
Mode: r.Mode,
|
||||
Size: size,
|
||||
Mode: int64(mode),
|
||||
Uid: r.UID,
|
||||
Gid: r.GID,
|
||||
}
|
||||
|
|
@ -1454,7 +1671,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
switch t {
|
||||
case tar.TypeReg:
|
||||
// Create directly empty files.
|
||||
if r.Size == 0 {
|
||||
if size == 0 {
|
||||
// Used to have a scope for cleanup.
|
||||
createEmptyFile := func() error {
|
||||
file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0)
|
||||
|
|
@ -1474,7 +1691,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
|
||||
case tar.TypeDir:
|
||||
if r.Name == "" || r.Name == "." {
|
||||
if r.Name == "/" {
|
||||
output.RootDirMode = &mode
|
||||
}
|
||||
if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil {
|
||||
|
|
@ -1509,7 +1726,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
return output, fmt.Errorf("invalid type %q", t)
|
||||
}
|
||||
|
||||
totalChunksSize += r.Size
|
||||
totalChunksSize += size
|
||||
|
||||
if t == tar.TypeReg {
|
||||
index := i
|
||||
|
|
@ -1572,7 +1789,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
|
||||
switch chunk.ChunkType {
|
||||
case internal.ChunkTypeData:
|
||||
case minimal.ChunkTypeData:
|
||||
root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
|
||||
if err != nil {
|
||||
return output, err
|
||||
|
|
@ -1585,7 +1802,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
Offset: offset,
|
||||
}
|
||||
}
|
||||
case internal.ChunkTypeZeros:
|
||||
case minimal.ChunkTypeZeros:
|
||||
missingPartsSize -= size
|
||||
mp.Hole = true
|
||||
// Mark all chunks belonging to the missing part as holes
|
||||
|
|
@ -1609,6 +1826,39 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
}
|
||||
|
||||
// To ensure that consumers of the layer who decompress and read the full tar stream,
|
||||
// and consumers who consume the data via the TOC, both see exactly the same data and metadata,
|
||||
// compute the UncompressedDigest.
|
||||
// c/image will then ensure that this value matches the value in the image config’s RootFS.DiffID, i.e. the image must commit
|
||||
// to one UncompressedDigest value for each layer, and that will avoid the ambiguity (in consumers who validate layers against DiffID).
|
||||
//
|
||||
// c/image also uses the UncompressedDigest as a layer ID, allowing it to use the traditional layer and image IDs.
|
||||
//
|
||||
// This is, sadly, quite costly: Up to now we might have only have had to write, and digest, only the new/modified files.
|
||||
// Here we need to read, and digest, the whole layer, even if almost all of it was already present locally previously.
|
||||
// So, really specialized (EXTREMELY RARE) users can opt out of this check using insecureAllowUnpredictableImageContents .
|
||||
//
|
||||
// Layers without a tar-split (estargz layers and old zstd:chunked layers) can't produce an UncompressedDigest that
|
||||
// matches the expected RootFS.DiffID; we always fall back to full pulls, again unless the user opts out
|
||||
// via insecureAllowUnpredictableImageContents .
|
||||
if output.UncompressedDigest == "" {
|
||||
switch {
|
||||
case c.pullOptions.insecureAllowUnpredictableImageContents:
|
||||
// Oh well. Skip the costly digest computation.
|
||||
case output.TarSplit != nil:
|
||||
metadata := tsStorage.NewJSONUnpacker(bytes.NewReader(output.TarSplit))
|
||||
fg := newStagedFileGetter(dirFile, flatPathNameMap)
|
||||
digester := digest.Canonical.Digester()
|
||||
if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil {
|
||||
return output, fmt.Errorf("digesting staged uncompressed stream: %w", err)
|
||||
}
|
||||
output.UncompressedDigest = digester.Digest()
|
||||
default:
|
||||
// We are checking for this earlier in GetDiffer, so this should not be reachable.
|
||||
return output, fmt.Errorf(`internal error: layer's UncompressedDigest is unknown and "insecure_allow_unpredictable_image_contents" is not set`)
|
||||
}
|
||||
}
|
||||
|
||||
if totalChunksSize > 0 {
|
||||
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
|
||||
}
|
||||
|
|
@ -1618,7 +1868,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
return output, nil
|
||||
}
|
||||
|
||||
func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
||||
func mustSkipFile(fileType compressedFileType, e minimal.FileMetadata) bool {
|
||||
// ignore the metadata files for the estargz format.
|
||||
if fileType != fileTypeEstargz {
|
||||
return false
|
||||
|
|
@ -1631,7 +1881,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []minimal.FileMetadata) ([]fileMetadata, error) {
|
||||
countNextChunks := func(start int) int {
|
||||
count := 0
|
||||
for _, e := range entries[start:] {
|
||||
|
|
@ -1668,7 +1918,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
|||
if e.Type == TypeReg {
|
||||
nChunks := countNextChunks(i + 1)
|
||||
|
||||
e.chunks = make([]*internal.FileMetadata, nChunks+1)
|
||||
e.chunks = make([]*minimal.FileMetadata, nChunks+1)
|
||||
for j := 0; j <= nChunks; j++ {
|
||||
// we need a copy here, otherwise we override the
|
||||
// .Size later
|
||||
|
|
@ -1703,7 +1953,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
|||
|
||||
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
|
||||
// same digest as chunk.ChunkDigest
|
||||
func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
|
||||
func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
|
||||
parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return false
|
||||
|
|
@ -1734,3 +1984,33 @@ func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offs
|
|||
|
||||
return digester.Digest() == digest
|
||||
}
|
||||
|
||||
// newStagedFileGetter returns an object usable as storage.FileGetter for rootDir.
|
||||
// if flatPathNameMap is not nil, it must be used to map logical file names into the backing file paths.
|
||||
func newStagedFileGetter(rootDir *os.File, flatPathNameMap map[string]string) *stagedFileGetter {
|
||||
return &stagedFileGetter{
|
||||
rootDir: rootDir,
|
||||
flatPathNameMap: flatPathNameMap,
|
||||
}
|
||||
}
|
||||
|
||||
type stagedFileGetter struct {
|
||||
rootDir *os.File
|
||||
flatPathNameMap map[string]string // nil, or a map from filepath.Clean()ed tar file names to expected on-filesystem names
|
||||
}
|
||||
|
||||
func (fg *stagedFileGetter) Get(filename string) (io.ReadCloser, error) {
|
||||
if fg.flatPathNameMap != nil {
|
||||
path, ok := fg.flatPathNameMap[filepath.Clean(filename)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no path mapping exists for tar entry %q", filename)
|
||||
}
|
||||
filename = path
|
||||
}
|
||||
pathFD, err := securejoin.OpenatInRoot(fg.rootDir, filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer pathFD.Close()
|
||||
return securejoin.Reopen(pathFD, unix.O_RDONLY)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
|
|
@ -13,5 +13,5 @@ import (
|
|||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
return nil, errors.New("format not supported on this system")
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("format not supported on this system"))
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
|
|
@ -3,7 +3,7 @@ package toc
|
|||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/internal/minimal"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
|
|
@ -19,7 +19,7 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
|||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
d1, ok1 := annotations[tocJSONDigestAnnotation]
|
||||
d2, ok2 := annotations[internal.ManifestChecksumKey]
|
||||
d2, ok2 := annotations[minimal.ManifestChecksumKey]
|
||||
switch {
|
||||
case ok1 && ok2:
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
|
|
|
|||
177
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
177
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
|
|
@ -369,27 +370,66 @@ func checkChownErr(err error, name string, uid, gid int) error {
|
|||
|
||||
// Stat contains file states that can be overridden with ContainersOverrideXattr.
|
||||
type Stat struct {
|
||||
IDs IDPair
|
||||
Mode os.FileMode
|
||||
IDs IDPair
|
||||
Mode os.FileMode
|
||||
Major int
|
||||
Minor int
|
||||
}
|
||||
|
||||
// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string
|
||||
// that can be used as the value for the ContainersOverrideXattr xattr.
|
||||
func FormatContainersOverrideXattr(uid, gid, mode int) string {
|
||||
return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777)
|
||||
return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0)
|
||||
}
|
||||
|
||||
// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string
|
||||
// that can be used as the value for the ContainersOverrideXattr xattr. For devices, it also
|
||||
// needs the major and minor numbers.
|
||||
func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string {
|
||||
typ := ""
|
||||
switch mode & os.ModeType {
|
||||
case os.ModeDir:
|
||||
typ = "dir"
|
||||
case os.ModeSymlink:
|
||||
typ = "symlink"
|
||||
case os.ModeNamedPipe:
|
||||
typ = "pipe"
|
||||
case os.ModeSocket:
|
||||
typ = "socket"
|
||||
case os.ModeDevice:
|
||||
typ = fmt.Sprintf("block-%d-%d", major, minor)
|
||||
case os.ModeDevice | os.ModeCharDevice:
|
||||
typ = fmt.Sprintf("char-%d-%d", major, minor)
|
||||
default:
|
||||
typ = "file"
|
||||
}
|
||||
unixMode := mode & os.ModePerm
|
||||
if mode&os.ModeSetuid != 0 {
|
||||
unixMode |= 0o4000
|
||||
}
|
||||
if mode&os.ModeSetgid != 0 {
|
||||
unixMode |= 0o2000
|
||||
}
|
||||
if mode&os.ModeSticky != 0 {
|
||||
unixMode |= 0o1000
|
||||
}
|
||||
return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ)
|
||||
}
|
||||
|
||||
// GetContainersOverrideXattr will get and decode ContainersOverrideXattr.
|
||||
func GetContainersOverrideXattr(path string) (Stat, error) {
|
||||
var stat Stat
|
||||
xstat, err := system.Lgetxattr(path, ContainersOverrideXattr)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
return Stat{}, err
|
||||
}
|
||||
return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist.
|
||||
}
|
||||
|
||||
func parseOverrideXattr(xstat []byte) (Stat, error) {
|
||||
var stat Stat
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) != 3 {
|
||||
return stat, fmt.Errorf("The number of clons in %s does not equal to 3",
|
||||
if len(attrs) < 3 {
|
||||
return stat, fmt.Errorf("The number of parts in %s is less than 3",
|
||||
ContainersOverrideXattr)
|
||||
}
|
||||
|
||||
|
|
@ -397,47 +437,105 @@ func GetContainersOverrideXattr(path string) (Stat, error) {
|
|||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse UID: %w", err)
|
||||
}
|
||||
|
||||
stat.IDs.UID = int(value)
|
||||
|
||||
value, err = strconv.ParseUint(attrs[0], 10, 32)
|
||||
value, err = strconv.ParseUint(attrs[1], 10, 32)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse GID: %w", err)
|
||||
}
|
||||
|
||||
stat.IDs.GID = int(value)
|
||||
|
||||
value, err = strconv.ParseUint(attrs[2], 8, 32)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse mode: %w", err)
|
||||
}
|
||||
stat.Mode = os.FileMode(value) & os.ModePerm
|
||||
if value&0o1000 != 0 {
|
||||
stat.Mode |= os.ModeSticky
|
||||
}
|
||||
if value&0o2000 != 0 {
|
||||
stat.Mode |= os.ModeSetgid
|
||||
}
|
||||
if value&0o4000 != 0 {
|
||||
stat.Mode |= os.ModeSetuid
|
||||
}
|
||||
|
||||
stat.Mode = os.FileMode(value)
|
||||
|
||||
if len(attrs) > 3 {
|
||||
typ := attrs[3]
|
||||
if strings.HasPrefix(typ, "file") {
|
||||
} else if strings.HasPrefix(typ, "dir") {
|
||||
stat.Mode |= os.ModeDir
|
||||
} else if strings.HasPrefix(typ, "symlink") {
|
||||
stat.Mode |= os.ModeSymlink
|
||||
} else if strings.HasPrefix(typ, "pipe") {
|
||||
stat.Mode |= os.ModeNamedPipe
|
||||
} else if strings.HasPrefix(typ, "socket") {
|
||||
stat.Mode |= os.ModeSocket
|
||||
} else if strings.HasPrefix(typ, "block") {
|
||||
stat.Mode |= os.ModeDevice
|
||||
stat.Major, stat.Minor, err = parseDevice(typ)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
}
|
||||
} else if strings.HasPrefix(typ, "char") {
|
||||
stat.Mode |= os.ModeDevice | os.ModeCharDevice
|
||||
stat.Major, stat.Minor, err = parseDevice(typ)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
}
|
||||
} else {
|
||||
return stat, fmt.Errorf("Invalid file type %s", typ)
|
||||
}
|
||||
}
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func parseDevice(typ string) (int, int, error) {
|
||||
parts := strings.Split(typ, "-")
|
||||
// If there are more than 3 parts, just ignore them to be forward compatible
|
||||
if len(parts) < 3 {
|
||||
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
|
||||
}
|
||||
if parts[0] != "block" && parts[0] != "char" {
|
||||
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
|
||||
}
|
||||
major, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to parse major number: %w", err)
|
||||
}
|
||||
minor, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err)
|
||||
}
|
||||
return major, minor, nil
|
||||
}
|
||||
|
||||
// SetContainersOverrideXattr will encode and set ContainersOverrideXattr.
|
||||
func SetContainersOverrideXattr(path string, stat Stat) error {
|
||||
value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode))
|
||||
value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor)
|
||||
return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0)
|
||||
}
|
||||
|
||||
func SafeChown(name string, uid, gid int) error {
|
||||
if runtime.GOOS == "darwin" {
|
||||
var mode os.FileMode = 0o0700
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil {
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) == 3 {
|
||||
val, err := strconv.ParseUint(attrs[2], 8, 32)
|
||||
if err == nil {
|
||||
mode = os.FileMode(val)
|
||||
}
|
||||
}
|
||||
stat := Stat{
|
||||
Mode: os.FileMode(0o0700),
|
||||
}
|
||||
value := Stat{IDPair{uid, gid}, mode}
|
||||
if err = SetContainersOverrideXattr(name, value); err != nil {
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil && xstat != nil {
|
||||
stat, err = parseOverrideXattr(xstat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat.Mode = st.Mode()
|
||||
}
|
||||
stat.IDs = IDPair{UID: uid, GID: gid}
|
||||
if err = SetContainersOverrideXattr(name, stat); err != nil {
|
||||
return err
|
||||
}
|
||||
uid = os.Getuid()
|
||||
|
|
@ -453,19 +551,24 @@ func SafeChown(name string, uid, gid int) error {
|
|||
|
||||
func SafeLchown(name string, uid, gid int) error {
|
||||
if runtime.GOOS == "darwin" {
|
||||
var mode os.FileMode = 0o0700
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil {
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) == 3 {
|
||||
val, err := strconv.ParseUint(attrs[2], 8, 32)
|
||||
if err == nil {
|
||||
mode = os.FileMode(val)
|
||||
}
|
||||
}
|
||||
stat := Stat{
|
||||
Mode: os.FileMode(0o0700),
|
||||
}
|
||||
value := Stat{IDPair{uid, gid}, mode}
|
||||
if err = SetContainersOverrideXattr(name, value); err != nil {
|
||||
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
|
||||
if err == nil && xstat != nil {
|
||||
stat, err = parseOverrideXattr(xstat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat.Mode = st.Mode()
|
||||
}
|
||||
stat.IDs = IDPair{UID: uid, GID: gid}
|
||||
if err = SetContainersOverrideXattr(name, stat); err != nil {
|
||||
return err
|
||||
}
|
||||
uid = os.Getuid()
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/ioutils/writers.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/ioutils/writers.go
generated
vendored
|
|
@ -36,9 +36,9 @@ func (r *writeCloserWrapper) Close() error {
|
|||
}
|
||||
|
||||
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
||||
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
||||
func NewWriteCloserWrapper(w io.Writer, closer func() error) io.WriteCloser {
|
||||
return &writeCloserWrapper{
|
||||
Writer: r,
|
||||
Writer: w,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
//go:build linux && cgo
|
||||
//go:build linux
|
||||
|
||||
package loopback
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
//go:build linux && cgo
|
||||
//go:build linux
|
||||
|
||||
package loopback
|
||||
|
||||
|
|
|
|||
38
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
38
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
|
|
@ -1,21 +1,7 @@
|
|||
//go:build linux && cgo
|
||||
//go:build linux
|
||||
|
||||
package loopback
|
||||
|
||||
/*
|
||||
#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
|
||||
|
||||
#ifndef LOOP_CTL_GET_FREE
|
||||
#define LOOP_CTL_GET_FREE 0x4C82
|
||||
#endif
|
||||
|
||||
#ifndef LO_FLAGS_PARTSCAN
|
||||
#define LO_FLAGS_PARTSCAN 8
|
||||
#endif
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type loopInfo64 struct {
|
||||
loDevice uint64 /* ioctl r/o */
|
||||
loInode uint64 /* ioctl r/o */
|
||||
|
|
@ -34,19 +20,19 @@ type loopInfo64 struct {
|
|||
|
||||
// IOCTL consts
|
||||
const (
|
||||
LoopSetFd = C.LOOP_SET_FD
|
||||
LoopCtlGetFree = C.LOOP_CTL_GET_FREE
|
||||
LoopGetStatus64 = C.LOOP_GET_STATUS64
|
||||
LoopSetStatus64 = C.LOOP_SET_STATUS64
|
||||
LoopClrFd = C.LOOP_CLR_FD
|
||||
LoopSetCapacity = C.LOOP_SET_CAPACITY
|
||||
LoopSetFd = 0x4C00
|
||||
LoopCtlGetFree = 0x4C82
|
||||
LoopGetStatus64 = 0x4C05
|
||||
LoopSetStatus64 = 0x4C04
|
||||
LoopClrFd = 0x4C01
|
||||
LoopSetCapacity = 0x4C07
|
||||
)
|
||||
|
||||
// LOOP consts.
|
||||
const (
|
||||
LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
|
||||
LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY
|
||||
LoFlagsPartScan = C.LO_FLAGS_PARTSCAN
|
||||
LoKeySize = C.LO_KEY_SIZE
|
||||
LoNameSize = C.LO_NAME_SIZE
|
||||
LoFlagsAutoClear = 0x4C07
|
||||
LoFlagsReadOnly = 1
|
||||
LoFlagsPartScan = 8
|
||||
LoKeySize = 32
|
||||
LoNameSize = 64
|
||||
)
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
//go:build linux && cgo
|
||||
//go:build linux
|
||||
|
||||
package loopback
|
||||
|
||||
|
|
|
|||
93
vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go
generated
vendored
Normal file
93
vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
//go:build freebsd
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
EXTATTR_NAMESPACE_EMPTY = unix.EXTATTR_NAMESPACE_EMPTY
|
||||
EXTATTR_NAMESPACE_USER = unix.EXTATTR_NAMESPACE_USER
|
||||
EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM
|
||||
)
|
||||
|
||||
// ExtattrGetLink retrieves the value of the extended attribute identified by attrname
|
||||
// in the given namespace and associated with the given path in the file system.
|
||||
// If the path is a symbolic link, the extended attribute is retrieved from the link itself.
|
||||
// Returns a []byte slice if the extattr is set and nil otherwise.
|
||||
func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) {
|
||||
size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname,
|
||||
uintptr(unsafe.Pointer(nil)), 0)
|
||||
if errno != nil {
|
||||
if errno == unix.ENOATTR {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno}
|
||||
}
|
||||
if size == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
dest := make([]byte, size)
|
||||
size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname,
|
||||
uintptr(unsafe.Pointer(&dest[0])), size)
|
||||
if errno != nil {
|
||||
return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno}
|
||||
}
|
||||
|
||||
return dest[:size], nil
|
||||
}
|
||||
|
||||
// ExtattrSetLink sets the value of extended attribute identified by attrname
|
||||
// in the given namespace and associated with the given path in the file system.
|
||||
// If the path is a symbolic link, the extended attribute is set on the link itself.
|
||||
func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error {
|
||||
if len(data) == 0 {
|
||||
data = []byte{} // ensure non-nil for empty data
|
||||
}
|
||||
if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname,
|
||||
uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil {
|
||||
return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtattrListLink lists extended attributes associated with the given path
|
||||
// in the specified namespace. If the path is a symbolic link, the attributes
|
||||
// are listed from the link itself.
|
||||
func ExtattrListLink(path string, attrnamespace int) ([]string, error) {
|
||||
size, errno := unix.ExtattrListLink(path, attrnamespace,
|
||||
uintptr(unsafe.Pointer(nil)), 0)
|
||||
if errno != nil {
|
||||
return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno}
|
||||
}
|
||||
if size == 0 {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
dest := make([]byte, size)
|
||||
size, errno = unix.ExtattrListLink(path, attrnamespace,
|
||||
uintptr(unsafe.Pointer(&dest[0])), size)
|
||||
if errno != nil {
|
||||
return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno}
|
||||
}
|
||||
|
||||
var attrs []string
|
||||
for i := 0; i < size; {
|
||||
// Each attribute is preceded by a single byte length
|
||||
length := int(dest[i])
|
||||
i++
|
||||
if i+length > size {
|
||||
break
|
||||
}
|
||||
attrs = append(attrs, string(dest[i:i+length]))
|
||||
i += length
|
||||
}
|
||||
|
||||
return attrs, nil
|
||||
}
|
||||
24
vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go
generated
vendored
Normal file
24
vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
//go:build !freebsd
|
||||
|
||||
package system
|
||||
|
||||
const (
|
||||
EXTATTR_NAMESPACE_EMPTY = 0
|
||||
EXTATTR_NAMESPACE_USER = 0
|
||||
EXTATTR_NAMESPACE_SYSTEM = 0
|
||||
)
|
||||
|
||||
// ExtattrGetLink is not supported on platforms other than FreeBSD.
|
||||
func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) {
|
||||
return nil, ErrNotSupportedPlatform
|
||||
}
|
||||
|
||||
// ExtattrSetLink is not supported on platforms other than FreeBSD.
|
||||
func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error {
|
||||
return ErrNotSupportedPlatform
|
||||
}
|
||||
|
||||
// ExtattrListLink is not supported on platforms other than FreeBSD.
|
||||
func ExtattrListLink(path string, attrnamespace int) ([]string, error) {
|
||||
return nil, ErrNotSupportedPlatform
|
||||
}
|
||||
13
vendor/github.com/containers/storage/pkg/system/stat_netbsd.go
generated
vendored
Normal file
13
vendor/github.com/containers/storage/pkg/system/stat_netbsd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
package system
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: uint32(s.Mode),
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtimespec}, nil
|
||||
}
|
||||
2
vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
generated
vendored
|
|
@ -12,7 +12,7 @@ const (
|
|||
E2BIG unix.Errno = unix.E2BIG
|
||||
|
||||
// Operation not supported
|
||||
EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
|
||||
ENOTSUP unix.Errno = unix.ENOTSUP
|
||||
)
|
||||
|
||||
// Lgetxattr retrieves the value of the extended attribute identified by attr
|
||||
|
|
|
|||
85
vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go
generated
vendored
Normal file
85
vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// Value is larger than the maximum size allowed
|
||||
E2BIG unix.Errno = unix.E2BIG
|
||||
|
||||
// Operation not supported
|
||||
ENOTSUP unix.Errno = unix.ENOTSUP
|
||||
|
||||
// Value is too small or too large for maximum size allowed
|
||||
EOVERFLOW unix.Errno = unix.EOVERFLOW
|
||||
)
|
||||
|
||||
var (
|
||||
namespaceMap = map[string]int{
|
||||
"user": EXTATTR_NAMESPACE_USER,
|
||||
"system": EXTATTR_NAMESPACE_SYSTEM,
|
||||
}
|
||||
)
|
||||
|
||||
func xattrToExtattr(xattr string) (namespace int, extattr string, err error) {
|
||||
namespaceName, extattr, found := strings.Cut(xattr, ".")
|
||||
if !found {
|
||||
return -1, "", ENOTSUP
|
||||
}
|
||||
|
||||
namespace, ok := namespaceMap[namespaceName]
|
||||
if !ok {
|
||||
return -1, "", ENOTSUP
|
||||
}
|
||||
return namespace, extattr, nil
|
||||
}
|
||||
|
||||
// Lgetxattr retrieves the value of the extended attribute identified by attr
|
||||
// and associated with the given path in the file system.
|
||||
// Returns a []byte slice if the xattr is set and nil otherwise.
|
||||
func Lgetxattr(path string, attr string) ([]byte, error) {
|
||||
namespace, extattr, err := xattrToExtattr(attr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ExtattrGetLink(path, namespace, extattr)
|
||||
}
|
||||
|
||||
// Lsetxattr sets the value of the extended attribute identified by attr
|
||||
// and associated with the given path in the file system.
|
||||
func Lsetxattr(path string, attr string, value []byte, flags int) error {
|
||||
if flags != 0 {
|
||||
// FIXME: Flags are not supported on FreeBSD, but we can implement
|
||||
// them mimicking the behavior of the Linux implementation.
|
||||
// See lsetxattr(2) on Linux for more information.
|
||||
return ENOTSUP
|
||||
}
|
||||
|
||||
namespace, extattr, err := xattrToExtattr(attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ExtattrSetLink(path, namespace, extattr, value)
|
||||
}
|
||||
|
||||
// Llistxattr lists extended attributes associated with the given path
|
||||
// in the file system.
|
||||
func Llistxattr(path string) ([]string, error) {
|
||||
attrs := []string{}
|
||||
|
||||
for namespaceName, namespace := range namespaceMap {
|
||||
namespaceAttrs, err := ExtattrListLink(path, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, attr := range namespaceAttrs {
|
||||
attrs = append(attrs, namespaceName+"."+attr)
|
||||
}
|
||||
}
|
||||
|
||||
return attrs, nil
|
||||
}
|
||||
2
vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
generated
vendored
|
|
@ -12,7 +12,7 @@ const (
|
|||
E2BIG unix.Errno = unix.E2BIG
|
||||
|
||||
// Operation not supported
|
||||
EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
|
||||
ENOTSUP unix.Errno = unix.ENOTSUP
|
||||
|
||||
// Value is too small or too large for maximum size allowed
|
||||
EOVERFLOW unix.Errno = unix.EOVERFLOW
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
//go:build !linux && !darwin
|
||||
//go:build !linux && !darwin && !freebsd
|
||||
|
||||
package system
|
||||
|
||||
|
|
@ -9,7 +9,7 @@ const (
|
|||
E2BIG syscall.Errno = syscall.Errno(0)
|
||||
|
||||
// Operation not supported
|
||||
EOPNOTSUPP syscall.Errno = syscall.Errno(0)
|
||||
ENOTSUP syscall.Errno = syscall.Errno(0)
|
||||
|
||||
// Value is too small or too large for maximum size allowed
|
||||
EOVERFLOW syscall.Errno = syscall.Errno(0)
|
||||
|
|
|
|||
19
vendor/github.com/containers/storage/storage.conf
generated
vendored
19
vendor/github.com/containers/storage/storage.conf
generated
vendored
|
|
@ -80,6 +80,25 @@ additionalimagestores = [
|
|||
# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
|
||||
# convert_images = "false"
|
||||
|
||||
# This should ALMOST NEVER be set.
|
||||
# It allows partial pulls of images without guaranteeing that "partial
|
||||
# pulls" and non-partial pulls both result in consistent image contents.
|
||||
# This allows pulling estargz images and early versions of zstd:chunked images;
|
||||
# otherwise, these layers always use the traditional non-partial pull path.
|
||||
#
|
||||
# This option should be enabled EXTREMELY rarely, only if ALL images that could
|
||||
# EVER be conceivably pulled on this system are GUARANTEED (e.g. using a signature policy)
|
||||
# to come from a build system trusted to never attack image integrity.
|
||||
#
|
||||
# If this consistency enforcement were disabled, malicious images could be built
|
||||
# in a way designed to evade other audit mechanisms, so presence of most other audit
|
||||
# mechanisms is not a replacement for the above-mentioned need for all images to come
|
||||
# from a trusted build system.
|
||||
#
|
||||
# As a side effect, enabling this option will also make image IDs unpredictable
|
||||
# (usually not equal to the traditional value matching the config digest).
|
||||
# insecure_allow_unpredictable_image_contents = "false"
|
||||
|
||||
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
|
||||
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
|
||||
# to containers configured to create automatically a user namespace. Containers
|
||||
|
|
|
|||
64
vendor/github.com/containers/storage/store.go
generated
vendored
64
vendor/github.com/containers/storage/store.go
generated
vendored
|
|
@ -20,6 +20,7 @@ import (
|
|||
_ "github.com/containers/storage/drivers/register"
|
||||
|
||||
drivers "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
|
|
@ -166,6 +167,26 @@ type flaggableStore interface {
|
|||
|
||||
type StoreOptions = types.StoreOptions
|
||||
|
||||
type DedupHashMethod = dedup.DedupHashMethod
|
||||
|
||||
const (
|
||||
DedupHashInvalid = dedup.DedupHashInvalid
|
||||
DedupHashCRC = dedup.DedupHashCRC
|
||||
DedupHashFileSize = dedup.DedupHashFileSize
|
||||
DedupHashSHA256 = dedup.DedupHashSHA256
|
||||
)
|
||||
|
||||
type (
|
||||
DedupOptions = dedup.DedupOptions
|
||||
DedupResult = dedup.DedupResult
|
||||
)
|
||||
|
||||
// DedupArgs is used to pass arguments to the Dedup command.
|
||||
type DedupArgs struct {
|
||||
// Options that are passed directly to the internal/dedup.DedupDirs function.
|
||||
Options DedupOptions
|
||||
}
|
||||
|
||||
// Store wraps up the various types of file-based stores that we use into a
|
||||
// singleton object that initializes and manages them all together.
|
||||
type Store interface {
|
||||
|
|
@ -589,6 +610,9 @@ type Store interface {
|
|||
// MultiList returns consistent values as of a single point in time.
|
||||
// WARNING: The values may already be out of date by the time they are returned to the caller.
|
||||
MultiList(MultiListOptions) (MultiListResult, error)
|
||||
|
||||
// Dedup deduplicates layers in the store.
|
||||
Dedup(DedupArgs) (drivers.DedupResult, error)
|
||||
}
|
||||
|
||||
// AdditionalLayer represents a layer that is contained in the additional layer store
|
||||
|
|
@ -3843,3 +3867,43 @@ func (s *store) MultiList(options MultiListOptions) (MultiListResult, error) {
|
|||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Dedup deduplicates layers in the store.
|
||||
func (s *store) Dedup(req DedupArgs) (drivers.DedupResult, error) {
|
||||
imgs, err := s.Images()
|
||||
if err != nil {
|
||||
return drivers.DedupResult{}, err
|
||||
}
|
||||
var topLayers []string
|
||||
for _, i := range imgs {
|
||||
topLayers = append(topLayers, i.TopLayer)
|
||||
topLayers = append(topLayers, i.MappedTopLayers...)
|
||||
}
|
||||
return writeToLayerStore(s, func(rlstore rwLayerStore) (drivers.DedupResult, error) {
|
||||
layers := make(map[string]struct{})
|
||||
for _, i := range topLayers {
|
||||
cur := i
|
||||
for cur != "" {
|
||||
if _, visited := layers[cur]; visited {
|
||||
break
|
||||
}
|
||||
l, err := rlstore.Get(cur)
|
||||
if err != nil {
|
||||
if err == ErrLayerUnknown {
|
||||
break
|
||||
}
|
||||
return drivers.DedupResult{}, err
|
||||
}
|
||||
layers[cur] = struct{}{}
|
||||
cur = l.Parent
|
||||
}
|
||||
}
|
||||
r := drivers.DedupArgs{
|
||||
Options: req.Options,
|
||||
}
|
||||
for l := range layers {
|
||||
r.Layers = append(r.Layers, l)
|
||||
}
|
||||
return rlstore.dedup(r)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue