go.mod: update osbuild/images to v0.156.0

tag v0.155.0
Tagger: imagebuilder-bot <imagebuilder-bots+imagebuilder-bot@redhat.com>

Changes with 0.155.0

----------------
  * Fedora 43: add shadow-utils when LockRoot is enabled, update cloud-init service name (osbuild/images#1618)
    * Author: Achilleas Koutsou, Reviewers: Gianluca Zuccarelli, Michael Vogt
  * Update osbuild dependency commit ID to latest (osbuild/images#1609)
    * Author: SchutzBot, Reviewers: Achilleas Koutsou, Simon de Vlieger, Tomáš Hozza
  * Update snapshots to 20250626 (osbuild/images#1623)
    * Author: SchutzBot, Reviewers: Achilleas Koutsou, Simon de Vlieger
  * distro/rhel9: xz compress azure-cvm image type [HMS-8587] (osbuild/images#1620)
    * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza
  * distro/rhel: introduce new image type: Azure SAP Apps [HMS-8738] (osbuild/images#1612)
    * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza
  * distro/rhel: move ansible-core to sap_extras_pkgset (osbuild/images#1624)
    * Author: Achilleas Koutsou, Reviewers: Brian C. Lane, Tomáš Hozza
  * github/create-tag: allow passing the version when run manually (osbuild/images#1621)
    * Author: Achilleas Koutsou, Reviewers: Lukáš Zapletal, Tomáš Hozza
  * rhel9: move image-config into pure YAML (HMS-8593) (osbuild/images#1616)
    * Author: Michael Vogt, Reviewers: Achilleas Koutsou, Simon de Vlieger
  * test: split manifest checksums into separate files (osbuild/images#1625)
    * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza

— Somewhere on the Internet, 2025-06-30

---

tag v0.156.0
Tagger: imagebuilder-bot <imagebuilder-bots+imagebuilder-bot@redhat.com>

Changes with 0.156.0

----------------
  * Many: delete repositories for EOL distributions (HMS-7044) (osbuild/images#1607)
    * Author: Tomáš Hozza, Reviewers: Michael Vogt, Simon de Vlieger
  * RHSM/facts: add 'image-builder CLI' API type (osbuild/images#1640)
    * Author: Tomáš Hozza, Reviewers: Brian C. Lane, Simon de Vlieger
  * Update dependencies 2025-06-29 (osbuild/images#1628)
    * Author: SchutzBot, Reviewers: Simon de Vlieger, Tomáš Hozza
  * Update osbuild dependency commit ID to latest (osbuild/images#1627)
    * Author: SchutzBot, Reviewers: Simon de Vlieger, Tomáš Hozza
  * [RFC] image: drop `InstallWeakDeps` from image.DiskImage (osbuild/images#1642)
    * Author: Michael Vogt, Reviewers: Brian C. Lane, Simon de Vlieger, Tomáš Hozza
  * build(deps): bump the go-deps group across 1 directory with 3 updates (osbuild/images#1632)
    * Author: dependabot[bot], Reviewers: SchutzBot, Tomáš Hozza
  * distro/rhel10: xz compress azure-cvm image type (osbuild/images#1638)
    * Author: Achilleas Koutsou, Reviewers: Brian C. Lane, Simon de Vlieger
  * distro: cleanup/refactor distro/{defs,generic} (HMS-8744) (osbuild/images#1570)
    * Author: Michael Vogt, Reviewers: Simon de Vlieger, Tomáš Hozza
  * distro: remove some hardcoded values from generic/images.go (osbuild/images#1636)
    * Author: Michael Vogt, Reviewers: Simon de Vlieger, Tomáš Hozza
  * distro: small tweaks for the YAML based imagetypes (osbuild/images#1622)
    * Author: Michael Vogt, Reviewers: Brian C. Lane, Simon de Vlieger
  * fedora/wsl: packages and locale (osbuild/images#1635)
    * Author: Simon de Vlieger, Reviewers: Michael Vogt, Tomáš Hozza
  * image/many: make compression more generic (osbuild/images#1634)
    * Author: Simon de Vlieger, Reviewers: Brian C. Lane, Michael Vogt
  * manifest: handle content template name with spaces (osbuild/images#1641)
    * Author: Bryttanie, Reviewers: Brian C. Lane, Michael Vogt, Tomáš Hozza
  * many: implement gzip (osbuild/images#1633)
    * Author: Simon de Vlieger, Reviewers: Michael Vogt, Tomáš Hozza
  * rhel/azure: set GRUB_TERMINAL based on architecture [RHEL-91383] (osbuild/images#1626)
    * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza

— Somewhere on the Internet, 2025-07-07

---
This commit is contained in:
Achilleas Koutsou 2025-07-10 16:14:25 +02:00
parent 60c5f10af8
commit 3fd7092db5
1486 changed files with 124742 additions and 82516 deletions

View file

@ -5,10 +5,12 @@ import (
"io"
"math"
"net"
"net/http"
"net/url"
"syscall"
"time"
"github.com/containers/image/v5/docker"
"github.com/docker/distribution/registry/api/errcode"
errcodev2 "github.com/docker/distribution/registry/api/v2"
"github.com/hashicorp/go-multierror"
@ -47,7 +49,7 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options)
logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, options.MaxRetry, err)
select {
case <-time.After(delay):
break
// Do nothing.
case <-ctx.Done():
return err
}
@ -81,6 +83,13 @@ func IsErrorRetryable(err error) bool {
return false
}
return true
case docker.UnexpectedHTTPStatusError:
// Retry on 502, 502 and 503 http server errors, they appear to be quite common in the field.
// https://github.com/containers/common/issues/2299
if e.StatusCode >= http.StatusBadGateway && e.StatusCode <= http.StatusGatewayTimeout {
return true
}
return false
case *net.OpError:
return IsErrorRetryable(e.Err)
case *url.Error: // This includes errors returned by the net/http client.

View file

@ -148,6 +148,13 @@ type Options struct {
// so that storage.ResolveReference returns exactly the created image.
// WARNING: It is unspecified whether the reference also contains a reference.Named element.
ReportResolvedReference *types.ImageReference
// DestinationTimestamp, if set, will force timestamps of content created in the destination to this value.
// Most transports don't support this.
//
// In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry
// (but not a timestamp of the created archive file).
DestinationTimestamp *time.Time
}
// OptionCompressionVariant allows to supply information about
@ -354,6 +361,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{
UnparsedToplevel: c.unparsedToplevel,
ReportResolvedReference: options.ReportResolvedReference,
Timestamp: options.DestinationTimestamp,
}); err != nil {
return nil, fmt.Errorf("committing the finished image: %w", err)
}

View file

@ -83,7 +83,7 @@ func platformCompressionMap(list internalManifest.List, instanceDigests []digest
platformSet = set.New[string]()
res[platform] = platformSet
}
platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames)
platformSet.AddSeq(slices.Values(instanceDetails.ReadOnly.CompressionAlgorithmNames))
}
return res, nil
}

View file

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"iter"
"maps"
"reflect"
"slices"
@ -328,19 +329,16 @@ func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, sr
}
wantedPlatforms := platform.WantedPlatforms(sys)
options := newOrderedSet()
match := false
for _, wantedPlatform := range wantedPlatforms {
if !slices.ContainsFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
// For a transitional period, this might trigger warnings because the Variant
// field was added to OCI config only recently. If this turns out to be too noisy,
// revert this check to only look for (OS, Architecture).
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
match = true
break
return platform.MatchesPlatform(ociConfig.Platform, wantedPlatform)
}) {
options := newOrderedSet()
for _, p := range wantedPlatforms {
options.append(fmt.Sprintf("%s+%s+%q", p.OS, p.Architecture, p.Variant))
}
options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant))
}
if !match {
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
}
@ -420,7 +418,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
}
}
algos, err := algorithmsByNames(compressionAlgos.Values())
algos, err := algorithmsByNames(compressionAlgos.All())
if err != nil {
return nil, err
}
@ -555,7 +553,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
algos, err := algorithmsByNames(compressionAlgos.Values())
algos, err := algorithmsByNames(compressionAlgos.All())
if err != nil {
return nil, err
}
@ -991,10 +989,10 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
return digest.Canonical.FromReader(stream)
}
// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names
func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) {
// algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names
func algorithmsByNames(names iter.Seq[string]) ([]compressiontypes.Algorithm, error) {
result := []compressiontypes.Algorithm{}
for _, name := range names {
for name := range names {
algo, err := compression.AlgorithmByName(name)
if err != nil {
return nil, err

View file

@ -101,6 +101,9 @@ func NewReference(path string, ref reference.NamedTagged) (types.ImageReference,
// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index.
func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) {
if sourceIndex < 0 {
return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex)
}
return newReference(path, nil, sourceIndex, nil, nil)
}

View file

@ -35,9 +35,9 @@ type bodyReader struct {
body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close.
lastRetryOffset int64 // -1 if N/A
lastRetryTime time.Time // time.Time{} if N/A
lastRetryTime time.Time // IsZero() if N/A
offset int64 // Current offset within the blob
lastSuccessTime time.Time // time.Time{} if N/A
lastSuccessTime time.Time // IsZero() if N/A
}
// newBodyReader creates a bodyReader for request path in c.
@ -207,9 +207,9 @@ func (br *bodyReader) Read(p []byte) (int, error) {
}
// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value.
// If tm is time.Time{}, it returns math.NaN()
// If tm.IsZero(), it returns math.NaN()
func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 {
if tm == (time.Time{}) {
if tm.IsZero() {
return math.NaN()
}
return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0
@ -229,7 +229,7 @@ func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL stri
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress)
return nil
}
if br.lastRetryTime == (time.Time{}) {
if br.lastRetryTime.IsZero() {
logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr)
return nil
}

View file

@ -92,7 +92,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
// imageLoad accepts tar stream on reader and sends it to c
func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
resp, err := c.ImageLoad(ctx, reader, true)
resp, err := c.ImageLoad(ctx, reader, client.ImageLoadWithQuiet(true))
if err != nil {
return fmt.Errorf("starting a load operation in docker engine: %w", err)
}

View file

@ -87,10 +87,13 @@ func ParseReference(refString string) (types.ImageReference, error) {
// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly)
func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) {
if id != "" && ref != nil {
switch {
case id != "" && ref != nil:
return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time")
}
if ref != nil {
case id == "" && ref == nil:
return nil, errors.New("docker-daemon: reference must have at least one of an image ID and a reference string")
case ref != nil:
if reference.IsNameOnly(ref) {
return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}

View file

@ -30,14 +30,25 @@ import (
// errcode.Errors slice.
var errNoErrorsInBody = errors.New("no error details found in HTTP response body")
// unexpectedHTTPStatusError is returned when an unexpected HTTP status is
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
// returned when making a registry api call.
type unexpectedHTTPStatusError struct {
Status string
type UnexpectedHTTPStatusError struct {
// StatusCode code as returned from the server, so callers can
// match the exact code to make certain decisions if needed.
StatusCode int
// status text as displayed in the error message, not exposed as callers should match the number.
status string
}
func (e *unexpectedHTTPStatusError) Error() string {
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
func (e UnexpectedHTTPStatusError) Error() string {
return fmt.Sprintf("received unexpected HTTP status: %s", e.status)
}
func newUnexpectedHTTPStatusError(resp *http.Response) UnexpectedHTTPStatusError {
return UnexpectedHTTPStatusError{
StatusCode: resp.StatusCode,
status: resp.Status,
}
}
// unexpectedHTTPResponseError is returned when an expected HTTP status code
@ -117,7 +128,7 @@ func handleErrorResponse(resp *http.Response) error {
case resp.StatusCode == http.StatusUnauthorized:
// Check for OAuth errors within the `WWW-Authenticate` header first
// See https://tools.ietf.org/html/rfc6750#section-3
for _, c := range parseAuthHeader(resp.Header) {
for c := range iterateAuthHeader(resp.Header) {
if c.Scheme == "bearer" {
var err errcode.Error
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
@ -146,5 +157,5 @@ func handleErrorResponse(resp *http.Response) error {
}
return err
}
return &unexpectedHTTPStatusError{Status: resp.Status}
return newUnexpectedHTTPStatusError(resp)
}

View file

@ -11,6 +11,7 @@ import (
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@ -475,12 +476,11 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
}
// Checks if the auth headers in the response contain an indication of a failed
// authorizdation because of an "insufficient_scope" error. If that's the case,
// authorization because of an "insufficient_scope" error. If that's the case,
// returns the required scope to be used for fetching a new token.
func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) {
if res.StatusCode == http.StatusUnauthorized {
challenges := parseAuthHeader(res.Header)
for _, challenge := range challenges {
for challenge := range iterateAuthHeader(res.Header) {
if challenge.Scheme == "bearer" {
if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
@ -907,6 +907,10 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
}
tr := tlsclientconfig.NewTransport()
tr.TLSClientConfig = c.tlsClientConfig
// if set DockerProxyURL explicitly, use the DockerProxyURL instead of system proxy
if c.sys != nil && c.sys.DockerProxyURL != nil {
tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL)
}
c.client = &http.Client{Transport: tr}
ping := func(scheme string) error {
@ -924,7 +928,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return registryHTTPResponseToError(resp)
}
c.challenges = parseAuthHeader(resp.Header)
c.challenges = slices.Collect(iterateAuthHeader(resp.Header))
c.scheme = scheme
c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
return nil
@ -992,13 +996,18 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
continue
}
if resp.StatusCode != http.StatusOK {
err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
err := fmt.Errorf("error fetching external blob from %q: %w", u, newUnexpectedHTTPStatusError(resp))
remoteErrors = append(remoteErrors, err)
logrus.Debug(err)
resp.Body.Close()
continue
}
return resp.Body, getBlobSize(resp), nil
size, err := getBlobSize(resp)
if err != nil {
size = -1
}
return resp.Body, size, nil
}
if remoteErrors == nil {
return nil, 0, nil // fallback to non-external blob
@ -1006,12 +1015,20 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors))
}
func getBlobSize(resp *http.Response) int64 {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
size = -1
func getBlobSize(resp *http.Response) (int64, error) {
hdrs := resp.Header.Values("Content-Length")
if len(hdrs) == 0 {
return -1, errors.New(`Missing "Content-Length" header in response`)
}
return size
hdr := hdrs[0] // Equivalent to resp.Header.Get(…)
size, err := strconv.ParseInt(hdr, 10, 64)
if err != nil { // Gos response reader should already reject such values.
return -1, err
}
if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Gos response reader should already reject such values.
return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr)
}
return size, nil
}
// getBlob returns a stream for the specified blob in ref, and the blobs size (or -1 if unknown).
@ -1042,7 +1059,10 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
return nil, 0, fmt.Errorf("fetching blob: %w", err)
}
cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
blobSize := getBlobSize(res)
blobSize, err := getBlobSize(res)
if err != nil {
blobSize = -1
}
reconnectingReader, err := newBodyReader(ctx, c, path, res.Body)
if err != nil {

View file

@ -243,8 +243,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
size, err := getBlobSize(res)
if err != nil {
return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err)
}
logrus.Debugf("... already exists")
return true, getBlobSize(res), nil
return true, size, nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))

View file

@ -569,7 +569,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL
logrus.Debugf("... got status 404, as expected = end of signatures")
return nil, true, nil
} else if res.StatusCode != http.StatusOK {
return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
return nil, false, fmt.Errorf("reading signature from %s: %w", sigURL.Redacted(), newUnexpectedHTTPStatusError(res))
}
contentType := res.Header.Get("Content-Type")

View file

@ -40,10 +40,10 @@ func httpResponseToError(res *http.Response, context string) error {
err := registryHTTPResponseToError(res)
return ErrUnauthorizedForCredentials{Err: err}
default:
if context != "" {
context += ": "
if context == "" {
return newUnexpectedHTTPStatusError(res)
}
return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
return fmt.Errorf("%s: %w", context, newUnexpectedHTTPStatusError(res))
}
}

View file

@ -242,9 +242,7 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
}
knownRepoTags := set.New[string]()
for _, repoTag := range item.RepoTags {
knownRepoTags.Add(repoTag)
}
knownRepoTags.AddSeq(slices.Values(item.RepoTags))
for _, tag := range repoTags {
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()

View file

@ -1,5 +1,4 @@
//go:build !freebsd
// +build !freebsd
package docker

View file

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package docker

View file

@ -4,6 +4,7 @@ package docker
import (
"fmt"
"iter"
"net/http"
"strings"
)
@ -60,15 +61,17 @@ func init() {
}
}
func parseAuthHeader(header http.Header) []challenge {
challenges := []challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
challenges = append(challenges, challenge{Scheme: v, Parameters: p})
func iterateAuthHeader(header http.Header) iter.Seq[challenge] {
return func(yield func(challenge) bool) {
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
if !yield(challenge{Scheme: v, Parameters: p}) {
return
}
}
}
}
return challenges
}
// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`

View file

@ -30,6 +30,9 @@ type UnparsedImage struct {
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image
// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any.
//
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
//
// This is publicly visible as c/image/image.UnparsedInstance.
@ -48,6 +51,9 @@ func (i *UnparsedImage) Reference() types.ImageReference {
}
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
//
// Users of UnparsedImage are promised that this validates the image
// against either i.instanceDigest if set, or against a digest included in i.src.Reference.
func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
if i.cachedManifest == nil {
m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)

View file

@ -213,12 +213,12 @@ type instanceCandidate struct {
digest digest.Digest // Instance digest
}
func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool {
func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip types.OptionalBool) bool {
switch {
case ic.platformIndex != other.platformIndex:
return ic.platformIndex < other.platformIndex
case ic.isZstd != other.isZstd:
if !preferGzip {
if preferGzip != types.OptionalBoolTrue {
return ic.isZstd
} else {
return !ic.isZstd
@ -232,10 +232,6 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip
// chooseInstance is a private equivalent to ChooseInstanceByCompression,
// shared by ChooseInstance and ChooseInstanceByCompression.
func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
didPreferGzip := false
if preferGzip == types.OptionalBoolTrue {
didPreferGzip = true
}
wantedPlatforms := platform.WantedPlatforms(ctx)
var bestMatch *instanceCandidate
bestMatch = nil
@ -251,7 +247,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
}
candidate.platformIndex = platformIndex
}
if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
if bestMatch == nil || candidate.isPreferredOver(bestMatch, preferGzip) {
bestMatch = &candidate
}
}

View file

@ -3,6 +3,7 @@ package private
import (
"context"
"io"
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
@ -170,6 +171,12 @@ type CommitOptions struct {
// What “resolved” means is transport-specific.
// Transports which dont support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value.
ReportResolvedReference *types.ImageReference
// Timestamp, if set, will force timestamps of content created in the destination to this value.
// Most transports don't support this.
//
// In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry
// (but not a timestamp of the created archive file).
Timestamp *time.Time
}
// ImageSourceChunk is a portion of a blob.

View file

@ -1,22 +0,0 @@
//go:build linux
package reflink
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// LinkOrCopy attempts to reflink the source to the destination fd.
// If reflinking fails or is unsupported, it falls back to io.Copy().
func LinkOrCopy(src, dst *os.File) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
if errno == 0 {
return nil
}
_, err := io.Copy(dst, src)
return err
}

View file

@ -1,6 +1,9 @@
package set
import "golang.org/x/exp/maps"
import (
"iter"
"maps"
)
// FIXME:
// - Docstrings
@ -28,8 +31,8 @@ func (s *Set[E]) Add(v E) {
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
}
func (s *Set[E]) AddSlice(slice []E) {
for _, v := range slice {
func (s *Set[E]) AddSeq(seq iter.Seq[E]) {
for v := range seq {
s.Add(v)
}
}
@ -47,6 +50,6 @@ func (s *Set[E]) Empty() bool {
return len(s.m) == 0
}
func (s *Set[E]) Values() []E {
func (s *Set[E]) All() iter.Seq[E] {
return maps.Keys(s.m)
}

View file

@ -133,12 +133,12 @@ func (m *Schema1) ConfigInfo() types.BlobInfo {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *Schema1) LayerInfos() []LayerInfo {
layers := make([]LayerInfo, len(m.FSLayers))
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
layers[(len(m.FSLayers)-1)-i] = LayerInfo{
layers := make([]LayerInfo, 0, len(m.FSLayers))
for i, layer := range slices.Backward(m.FSLayers) { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
layers = append(layers, LayerInfo{
BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1},
EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway,
}
})
}
return layers
}
@ -284,7 +284,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
}
// Build the history.
convertedHistory := []Schema2History{}
for _, compat := range m.ExtractedV1Compatibility {
for _, compat := range slices.Backward(m.ExtractedV1Compatibility) {
hitem := Schema2History{
Created: compat.Created,
CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
@ -292,7 +292,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
Comment: compat.Comment,
EmptyLayer: compat.ThrowAway,
}
convertedHistory = append([]Schema2History{hitem}, convertedHistory...)
convertedHistory = append(convertedHistory, hitem)
}
// Build the rootfs information. We need the decompressed sums that we've been
// calculating to fill in the DiffIDs. It's expected (but not enforced by us)

View file

@ -166,10 +166,11 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
parts := strings.Split(mediatype, "+")
if slices.Contains(parts[1:], "encrypted") {
return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype)
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
unsuffixedMediatype := parts[0]
switch unsuffixedMediatype {
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer,
imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.

View file

@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os"
"time"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagedestination/impl"
@ -172,16 +173,19 @@ func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, opti
src := d.tempDirRef.tempDirectory
// path to save tarred up file
dst := d.ref.resolvedFile
return tarDirectory(src, dst)
return tarDirectory(src, dst, options.Timestamp)
}
// tar converts the directory at src and saves it to dst
func tarDirectory(src, dst string) error {
// if contentModTimes is non-nil, tar header entries times are set to this
func tarDirectory(src, dst string, contentModTimes *time.Time) (retErr error) {
// input is a stream of bytes from the archive of the directory at path
input, err := archive.TarWithOptions(src, &archive.TarOptions{
Compression: archive.Uncompressed,
// Dont include the data about the user account this code is running under.
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
// override tar header timestamps
Timestamp: contentModTimes,
})
if err != nil {
return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
@ -193,7 +197,14 @@ func tarDirectory(src, dst string) error {
if err != nil {
return fmt.Errorf("creating tar file %q: %w", dst, err)
}
defer outFile.Close()
// since we are writing to this file, make sure we handle errors
defer func() {
closeErr := outFile.Close()
if retErr == nil {
retErr = closeErr
}
}()
// copies the contents of the directory to the tar file
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.

View file

@ -52,13 +52,13 @@ func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) erro
return internal.ValidateScope(scope)
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI archive ImageReference.
func ParseReference(reference string) (types.ImageReference, error) {
file, image := internal.SplitPathAndImage(reference)
return NewReference(file, image)
}
// NewReference returns an OCI reference for a file and a image.
// NewReference returns an OCI archive reference for a file and an optional image name annotation (if not "").
func NewReference(file, image string) (types.ImageReference, error) {
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
if err != nil {

View file

@ -123,7 +123,7 @@ func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[diges
//
// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set
func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error {
for _, digest := range blobsToDelete.Values() {
for digest := range blobsToDelete.All() {
blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above
if err != nil {
return err
@ -159,7 +159,7 @@ func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
return saveJSON(ref.indexPath(), index)
}
func saveJSON(path string, content any) error {
func saveJSON(path string, content any) (retErr error) {
// If the file already exists, get its mode to preserve it
var mode fs.FileMode
existingfi, err := os.Stat(path)
@ -177,7 +177,13 @@ func saveJSON(path string, content any) error {
if err != nil {
return err
}
defer file.Close()
// since we are writing to this file, make sure we handle errors
defer func() {
closeErr := file.Close()
if retErr == nil {
retErr = closeErr
}
}()
return json.NewEncoder(file).Encode(content)
}

View file

@ -17,7 +17,6 @@ import (
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/reflink"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
digest "github.com/opencontainers/go-digest"
@ -116,7 +115,7 @@ func (d *ociImageDestination) Close() error {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (_ private.UploadedBlob, retErr error) {
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
if err != nil {
return private.UploadedBlob{}, err
@ -125,7 +124,10 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
explicitClosed := false
defer func() {
if !explicitClosed {
blobFile.Close()
closeErr := blobFile.Close()
if retErr == nil {
retErr = closeErr
}
}
if !succeeded {
os.Remove(blobFile.Name())
@ -177,7 +179,10 @@ func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDiges
}
// need to explicitly close the file, since a rename won't otherwise work on Windows
blobFile.Close()
err = blobFile.Close()
if err != nil {
return err
}
*closed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
@ -324,10 +329,10 @@ type PutBlobFromLocalFileOption struct{}
// It computes, and returns, the digest and size of the used file.
//
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (_ digest.Digest, _ int64, retErr error) {
d, ok := dest.(*ociImageDestination)
if !ok {
return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
return "", -1, errors.New("caller error: PutBlobFromLocalFile called with a non-oci: destination")
}
succeeded := false
@ -338,7 +343,10 @@ func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file
}
defer func() {
if !blobFileClosed {
blobFile.Close()
closeErr := blobFile.Close()
if retErr == nil {
retErr = closeErr
}
}
if !succeeded {
os.Remove(blobFile.Name())
@ -351,7 +359,7 @@ func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file
}
defer srcFile.Close()
err = reflink.LinkOrCopy(srcFile, blobFile)
err = fileutils.ReflinkOrCopy(srcFile, blobFile)
if err != nil {
return "", -1, err
}

View file

@ -16,6 +16,7 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/docker/go-connections/tlsconfig"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -214,3 +215,26 @@ func getBlobSize(resp *http.Response) int64 {
}
return size
}
// GetLocalBlobPath returns the local path to the blob file with the given digest.
// The returned path is checked for existence so when a non existing digest is
// given an error will be returned.
//
// Important: The returned path must be treated as read only, writing the file will
// corrupt the oci layout as the digest no longer matches.
func GetLocalBlobPath(ctx context.Context, src types.ImageSource, digest digest.Digest) (string, error) {
s, ok := src.(*ociImageSource)
if !ok {
return "", errors.New("caller error: GetLocalBlobPath called with a non-oci: source")
}
path, err := s.ref.blobPath(digest, s.sharedBlobDir)
if err != nil {
return "", err
}
if err := fileutils.Exists(path); err != nil {
return "", err
}
return path, nil
}

View file

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/oci/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
@ -110,13 +111,13 @@ func newReference(dir, image string, sourceIndex int) (types.ImageReference, err
// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index.
func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) {
if sourceIndex < 0 {
return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex)
}
return newReference(dir, "", sourceIndex)
}
// NewReference returns an OCI reference for a directory and a image.
//
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
// NewReference returns an OCI reference for a directory and an optional image name annotation (if not "").
func NewReference(dir, image string) (types.ImageReference, error) {
return newReference(dir, image, -1)
}
@ -234,7 +235,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
var unsupportedMIMETypes []string
for i, md := range index.Manifests {
if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex || md.MediaType == manifest.DockerV2Schema2MediaType || md.MediaType == manifest.DockerV2ListMediaType {
return md, i, nil
}
unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)

View file

@ -571,8 +571,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
// merge all of the struct values in the reverse order so that priority is given correctly
// errors are not added to the list the second time
nonMapConfig := clientcmdNewConfig()
for i := len(kubeconfigs) - 1; i >= 0; i-- {
kubeconfig := kubeconfigs[i]
for _, kubeconfig := range slices.Backward(kubeconfigs) {
if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil {
return nil, err
}
@ -921,7 +920,7 @@ func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
// TLSConfigFor returns a tls.Config that will provide the transport level security defined
// by the provided Config. Will return nil if no transport level security is requested.
func tlsConfigFor(c *restConfig) (*tls.Config, error) {
if !(c.HasCA() || c.HasCertAuth() || c.Insecure) {
if !c.HasCA() && !c.HasCertAuth() && !c.Insecure {
return nil, nil
}
if c.HasCA() && c.Insecure {

View file

@ -143,16 +143,24 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
return private.UploadedBlob{}, err
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
blobPath := filepath.Join(tmpDir, "content")
blobFile, err := os.Create(blobPath)
if err != nil {
return private.UploadedBlob{}, err
}
defer blobFile.Close()
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
size, err := func() (_ int64, retErr error) { // A scope for defer
// since we are writing to this file, make sure we handle errors
defer func() {
closeErr := blobFile.Close()
if retErr == nil {
retErr = closeErr
}
}()
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
return io.Copy(blobFile, stream)
}()
if err != nil {
return private.UploadedBlob{}, err
}
@ -247,9 +255,15 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
return err
}
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
func generateTarSplitMetadata(output *bytes.Buffer, file string) (_ digest.Digest, _ int64, retErr error) {
mfz := pgzip.NewWriter(output)
defer mfz.Close()
// since we are writing to this, make sure we handle errors
defer func() {
closeErr := mfz.Close()
if retErr == nil {
retErr = closeErr
}
}()
metaPacker := storage.NewJSONPacker(mfz)
stream, err := os.OpenFile(file, os.O_RDONLY, 0)

View file

@ -250,9 +250,7 @@ func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreeP
func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) {
var file *C.GFile
if strings.HasPrefix(filename, "./") {
filename = filename[2:]
}
filename, _ = strings.CutPrefix(filename, "./")
cfilename := C.CString(filename)
defer C.free(unsafe.Pointer(cfilename))

View file

@ -240,7 +240,7 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
if otherDigests != nil {
for _, d := range otherDigests.Values() {
for d := range otherDigests.All() {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options)
}

View file

@ -87,14 +87,20 @@ func new2(path string) (*cache, error) {
if err != nil {
return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err)
}
defer db.Close()
// We dont check the schema before every operation, because that would be costly
// and because we assume schema changes will be handled by using a different path.
if err := ensureDBHasCurrentSchema(db); err != nil {
err = func() (retErr error) { // A scope for defer
defer func() {
closeErr := db.Close()
if retErr == nil {
retErr = closeErr
}
}()
// We dont check the schema before every operation, because that would be costly
// and because we assume schema changes will be handled by using a different path.
return ensureDBHasCurrentSchema(db)
}()
if err != nil {
return nil, err
}
return &cache{
path: path,
refCount: 0,
@ -147,25 +153,30 @@ func (sqc *cache) Close() {
type void struct{} // So that we dont have to write struct{}{} all over the place
// transaction calls fn within a read-write transaction in sqc.
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (_ T, retErr error) {
db, closeDB, err := func() (*sql.DB, func() error, error) { // A scope for defer
sqc.lock.Lock()
defer sqc.lock.Unlock()
if sqc.db != nil {
return sqc.db, func() {}, nil
return sqc.db, func() error { return nil }, nil
}
db, err := rawOpen(sqc.path)
if err != nil {
return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err)
}
return db, func() { db.Close() }, nil
return db, db.Close, nil
}()
if err != nil {
var zeroRes T // A zero value of T
return zeroRes, err
}
defer closeDB()
defer func() {
closeErr := closeDB()
if retErr == nil {
retErr = closeErr
}
}()
return dbTransaction(db, fn)
}

View file

@ -6,6 +6,8 @@ import (
"errors"
"fmt"
"io/fs"
"iter"
"maps"
"os"
"os/exec"
"path/filepath"
@ -93,9 +95,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Credential helpers in the auth file have a
// direct mapping to a registry, so we can just
// walk the map.
for registry := range fileContents.CredHelpers {
allKeys.Add(registry)
}
allKeys.AddSeq(maps.Keys(fileContents.CredHelpers))
for key := range fileContents.AuthConfigs {
key := normalizeAuthFileKey(key, path.legacyFormat)
if key == normalizedDockerIORegistry {
@ -115,16 +115,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
return nil, err
}
}
for registry := range creds {
allKeys.Add(registry)
}
allKeys.AddSeq(maps.Keys(creds))
}
}
// Now use `GetCredentials` to the specific auth configs for each
// previously listed registry.
allCreds := make(map[string]types.DockerAuthConfig)
for _, key := range allKeys.Values() {
for key := range allKeys.All() {
creds, err := GetCredentials(sys, key)
if err != nil {
// Note: we rely on the logging in `GetCredentials`.
@ -818,16 +816,10 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
// Support sub-registry namespaces in auth.
// (This is not a feature of ~/.docker/config.json; we support it even for
// those files as an extension.)
var keys []string
if !path.legacyFormat {
keys = authKeysForKey(key)
} else {
keys = []string{registry}
}
//
// Repo or namespace keys are only supported as exact matches. For registry
// keys we prefer exact matches as well.
for _, key := range keys {
for key := range authKeyLookupOrder(key, registry, path.legacyFormat) {
if val, exists := fileContents.AuthConfigs[key]; exists {
return decodeDockerAuth(path.path, key, val)
}
@ -854,25 +846,33 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
return types.DockerAuthConfig{}, nil
}
// authKeysForKey returns the keys matching a provided auth file key, in order
// from the best match to worst. For example,
// authKeyLookupOrder returns a sequence for lookup keys matching (key or registry)
// in file with legacyFormat, in order from the best match to worst.
// For example, in a non-legacy file,
// when given a repository key "quay.io/repo/ns/image", it returns
// - quay.io/repo/ns/image
// - quay.io/repo/ns
// - quay.io/repo
// - quay.io
func authKeysForKey(key string) (res []string) {
for {
res = append(res, key)
lastSlash := strings.LastIndex(key, "/")
if lastSlash == -1 {
break
func authKeyLookupOrder(key, registry string, legacyFormat bool) iter.Seq[string] {
return func(yield func(string) bool) {
if legacyFormat {
_ = yield(registry) // We stop in any case
return
}
key = key[:lastSlash]
}
return res
for {
if !yield(key) {
return
}
lastSlash := strings.LastIndex(key, "/")
if lastSlash == -1 {
break
}
key = key[:lastSlash]
}
}
}
// decodeDockerAuth decodes the username and password from conf,

View file

@ -1,5 +1,4 @@
//go:build !freebsd
// +build !freebsd
package sysregistriesv2

View file

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package sysregistriesv2

View file

@ -134,7 +134,7 @@ func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Nam
// editShortNameAlias loads the aliases.conf file and changes it. If value is
// set, it adds the name-value pair as a new alias. Otherwise, it will remove
// name from the config.
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) {
if err := validateShortName(name); err != nil {
return err
}
@ -178,7 +178,13 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er
if err != nil {
return err
}
defer f.Close()
// since we are writing to this file, make sure we handle err on Close()
defer func() {
closeErr := f.Close()
if retErr == nil {
retErr = closeErr
}
}()
encoder := toml.NewEncoder(f)
return encoder.Encode(conf)
@ -229,7 +235,7 @@ func parseShortNameValue(alias string) (reference.Named, error) {
}
registry := reference.Domain(named)
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
if !strings.ContainsAny(registry, ".:") && registry != "localhost" {
return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias)
}

View file

@ -4,9 +4,11 @@ import (
"errors"
"fmt"
"io/fs"
"maps"
"os"
"path/filepath"
"reflect"
"slices"
"sort"
"strings"
"sync"
@ -18,7 +20,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/regexp"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// systemRegistriesConfPath is the path to the system-wide registry
@ -430,7 +431,8 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix)
}
// make sure mirrors are valid
for _, mir := range reg.Mirrors {
for j := range reg.Mirrors {
mir := &reg.Mirrors[j]
mir.Location, err = parseLocation(mir.Location)
if err != nil {
return err
@ -1040,12 +1042,10 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
}
// Go maps have a non-deterministic order when iterating the keys, so
// we dump them in a slice and sort it to enforce some order in
// Registries slice. Some consumers of c/image (e.g., CRI-O) log the
// configuration where a non-deterministic order could easily cause
// confusion.
prefixes := maps.Keys(registryMap)
sort.Strings(prefixes)
// we sort the keys to enforce some order in Registries slice.
// Some consumers of c/image (e.g., CRI-O) log the configuration
// and a non-deterministic order could easily cause confusion.
prefixes := slices.Sorted(maps.Keys(registryMap))
c.partialV2.Registries = []Registry{}
for _, prefix := range prefixes {

View file

@ -186,12 +186,18 @@ func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir
// has an -o option that allows extracting a squashfs from the SIF file directly,
// but that version is not currently available in RHEL 8.
logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
if err := func() error { // A scope for defer
if err := func() (retErr error) { // A scope for defer
f, err := os.Create(squashFSPath)
if err != nil {
return err
}
defer f.Close()
// since we are writing to this file, make sure we handle err on Close()
defer func() {
closeErr := f.Close()
if retErr == nil {
retErr = closeErr
}
}()
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
return err

View file

@ -1,5 +1,4 @@
//go:build !containers_image_fulcio_stub
// +build !containers_image_fulcio_stub
package signature

View file

@ -1,5 +1,4 @@
//go:build containers_image_fulcio_stub
// +build containers_image_fulcio_stub
package signature

View file

@ -1,5 +1,4 @@
//go:build !containers_image_rekor_stub
// +build !containers_image_rekor_stub
package internal

View file

@ -1,5 +1,4 @@
//go:build containers_image_rekor_stub
// +build containers_image_rekor_stub
package internal

View file

@ -1,5 +1,4 @@
//go:build !containers_image_openpgp
// +build !containers_image_openpgp
package signature

View file

@ -1,5 +1,4 @@
//go:build containers_image_openpgp
// +build containers_image_openpgp
package signature

View file

@ -1,5 +1,4 @@
//go:build !freebsd
// +build !freebsd
package signature

View file

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package signature

View file

@ -180,18 +180,18 @@ type PRSigstoreSignedPKI interface {
// prSigstoreSignedPKI contains non-fulcio certificate PKI configuration options for prSigstoreSigned
type prSigstoreSignedPKI struct {
// CARootsPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CARootsPath and CARootsData must be specified.
CARootsPath string `json:"caRootsPath"`
CARootsPath string `json:"caRootsPath,omitempty"`
// CARootsData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CARootsPath and CARootsData must be specified.
CARootsData []byte `json:"caRootsData"`
CARootsData []byte `json:"caRootsData,omitempty"`
// CAIntermediatesPath a path to a file containing accepted CA intermediate certificates, in PEM format. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both.
CAIntermediatesPath string `json:"caIntermediatesPath"`
CAIntermediatesPath string `json:"caIntermediatesPath,omitempty"`
// CAIntermediatesData contains accepted CA intermediate certificates in PEM format, all of that base64-encoded. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both.
CAIntermediatesData []byte `json:"caIntermediatesData"`
CAIntermediatesData []byte `json:"caIntermediatesData,omitempty"`
// SubjectEmail specifies the expected email address imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified.
SubjectEmail string `json:"subjectEmail"`
SubjectEmail string `json:"subjectEmail,omitempty"`
// SubjectHostname specifies the expected hostname imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified.
SubjectHostname string `json:"subjectHostname"`
SubjectHostname string `json:"subjectHostname,omitempty"`
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.

View file

@ -1,5 +1,4 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
@ -272,43 +271,56 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
}
defer file.Close()
counter := ioutils.NewWriteCounter(file)
stream = io.TeeReader(stream, counter)
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
decompressed, err := archive.DecompressStream(stream)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
}
blobDigest, diffID, count, err := func() (_, _ digest.Digest, _ int64, retErr error) { // A scope for defer
// since we are writing to this file, make sure we handle err on Close()
defer func() {
closeErr := file.Close()
if retErr == nil {
retErr = closeErr
}
}()
counter := ioutils.NewWriteCounter(file)
stream = io.TeeReader(stream, counter)
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
decompressed, err := archive.DecompressStream(stream)
if err != nil {
return "", "", 0, fmt.Errorf("setting up to decompress blob: %w", err)
diffID := digest.Canonical.Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
_, err = io.Copy(diffID.Hash(), decompressed)
decompressed.Close()
}
defer decompressed.Close()
diffID := digest.Canonical.Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
_, err = io.Copy(diffID.Hash(), decompressed)
if err != nil {
return "", "", 0, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
return digester.Digest(), diffID.Digest(), counter.Count, nil
}()
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
return private.UploadedBlob{}, err
}
// Determine blob properties, and fail if information that we were given about the blob
// is known to be incorrect.
blobDigest := digester.Digest()
blobSize := blobinfo.Size
if blobSize < 0 {
blobSize = counter.Count
} else if blobinfo.Size != counter.Count {
blobSize = count
} else if blobinfo.Size != count {
return private.UploadedBlob{}, ErrBlobSizeMismatch
}
// Record information about the blob.
s.lock.Lock()
s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest()
s.lockProtected.fileSizes[blobDigest] = counter.Count
s.lockProtected.blobDiffIDs[blobDigest] = diffID
s.lockProtected.fileSizes[blobDigest] = count
s.lockProtected.filenames[blobDigest] = filename
s.lock.Unlock()
// This is safe because we have just computed diffID, and blobDigest was either computed
// by us, or validated by the caller (usually copy.digestingReader).
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID)
return private.UploadedBlob{
Digest: blobDigest,
Size: blobSize,

View file

@ -1,5 +1,4 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage

View file

@ -1,5 +1,4 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage

View file

@ -1,5 +1,4 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage

View file

@ -1,5 +1,4 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
@ -362,15 +361,14 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
}
storeSpec := scope[1:closeIndex]
scope = scope[closeIndex+1:]
storeInfo := strings.SplitN(storeSpec, "@", 2)
if len(storeInfo) == 1 && storeInfo[0] != "" {
// One component: the graph root.
if !filepath.IsAbs(storeInfo[0]) {
if a, b, ok := strings.Cut(storeSpec, "@"); ok && a != "" && b != "" {
// Two components: the driver type and the graph root.
if !filepath.IsAbs(b) {
return ErrPathNotAbsolute
}
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
// Two components: the driver type and the graph root.
if !filepath.IsAbs(storeInfo[1]) {
} else if !ok && a != "" {
// One component: the graph root.
if !filepath.IsAbs(storeSpec) {
return ErrPathNotAbsolute
}
} else {

View file

@ -1,5 +1,4 @@
//go:build !containers_image_docker_daemon_stub
// +build !containers_image_docker_daemon_stub
package alltransports

View file

@ -1,5 +1,4 @@
//go:build containers_image_docker_daemon_stub
// +build containers_image_docker_daemon_stub
package alltransports

View file

@ -1,5 +1,4 @@
//go:build containers_image_ostree && linux
// +build containers_image_ostree,linux
package alltransports

View file

@ -1,5 +1,4 @@
//go:build !containers_image_ostree || !linux
// +build !containers_image_ostree !linux
package alltransports

View file

@ -1,5 +1,4 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package alltransports

View file

@ -1,5 +1,4 @@
//go:build containers_image_storage_stub
// +build containers_image_storage_stub
package alltransports

View file

@ -3,6 +3,7 @@ package types
import (
"context"
"io"
"net/url"
"time"
"github.com/containers/image/v5/docker/reference"
@ -241,6 +242,7 @@ type BlobInfoCache interface {
//
// WARNING: Various methods which return an object identified by digest generally do not
// validate that the returned data actually matches that digest; this is the callers responsibility.
// See the individual methods documentation for potentially more details.
type ImageSource interface {
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
@ -251,10 +253,17 @@ type ImageSource interface {
// It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
//
// WARNING: This is a raw access to the data as provided by the source; if the reference contains a digest, or instanceDigest is set,
// callers must enforce the digest match themselves, typically by using image.UnparsedInstance to access the manifest instead
// of calling this directly. (Compare the generic warning applicable to all of the [ImageSource] interface.)
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
//
// WARNING: This is a raw access to the data as provided by the source; callers must validate the contents
// against the blobs digest themselves. (Compare the generic warning applicable to all of the [ImageSource] interface.)
GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
HasThreadSafeGetBlob() bool
@ -655,6 +664,8 @@ type SystemContext struct {
// Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
// when the digest for a blob is unknown.
DockerRegistryPushPrecomputeDigests bool
// DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port)
DockerProxyURL *url.URL
// === docker/daemon.Transport overrides ===
// A directory containing a CA certificate (ending with ".crt"),

View file

@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 34
VersionMinor = 35
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 3
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View file

@ -23,7 +23,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -126,7 +126,7 @@ lint_task:
folder: $GOPATH/pkg/mod
build_script: |
apt-get update
apt-get install -y libbtrfs-dev
apt-get install -y libbtrfs-dev libsubid-dev
test_script: |
make TAGS=regex_precompile local-validate
make lint
@ -170,13 +170,13 @@ vendor_task:
cross_task:
alias: cross
container:
image: golang:1.22
image: golang:1.23
build_script: make cross
gofix_task:
alias: gofix
container:
image: golang:1.22
image: golang:1.23
build_script: go fix ./...
test_script: git diff --exit-code

View file

@ -1,7 +1,20 @@
---
run:
concurrency: 6
timeout: 5m
linters:
version: "2"
formatters:
enable:
- gofumpt
linters:
enable:
- nolintlint
- unconvert
exclusions:
presets:
- comments
- std-error-handling
settings:
staticcheck:
checks:
- all
- -ST1003 # https://staticcheck.dev/docs/checks/#ST1003 Poorly chosen identifier.
- -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression.

View file

@ -1,123 +1,14 @@
# Contributing to Containers/Storage
# Contributing to Containers/Storage
We'd love to have you join the community! Below summarizes the processes
that we follow.
We'd love to have you join the community! [Learn how to contribute](https://github.com/containers/common/blob/main/CONTRIBUTING.md) to the Containers Group Projects.
## Topics
Please note that the following information is specific to this project:
* [Reporting Issues](#reporting-issues)
* [Submitting Pull Requests](#submitting-pull-requests)
* [Communications](#communications)
<!--
* [Becoming a Maintainer](#becoming-a-maintainer)
-->
## Reporting Issues
Before reporting an issue, check our backlog of
[open issues](https://github.com/containers/storage/issues)
to see if someone else has already reported it. If so, feel free to add
your scenario, or additional information, to the discussion. Or simply
"subscribe" to it to be notified when it is updated.
If you find a new issue with the project we'd love to hear about it! The most
important aspect of a bug report is that it includes enough information for
us to reproduce it. So, please include as much detail as possible and try
to remove the extra stuff that doesn't really relate to the issue itself.
The easier it is for us to reproduce it, the faster it'll be fixed!
Please don't include any private/sensitive information in your issue!
## Submitting Pull Requests
No Pull Request (PR) is too small! Typos, additional comments in the code,
new testcases, bug fixes, new features, more documentation, ... it's all
welcome!
While bug fixes can first be identified via an "issue", that is not required.
It's ok to just open up a PR with the fix, but make sure you include the same
information you would have included in an issue - like how to reproduce it.
PRs for new features should include some background on what use cases the
new code is trying to address. When possible and when it makes sense, try to break-up
larger PRs into smaller ones - it's easier to review smaller
code changes. But only if those smaller ones make sense as stand-alone PRs.
Regardless of the type of PR, all PRs should include:
* well documented code changes
* additional testcases. Ideally, they should fail w/o your code change applied
* documentation changes
Squash your commits into logical pieces of work that might want to be reviewed
separate from the rest of the PRs. But, squashing down to just one commit is ok
too since in the end the entire PR will be reviewed anyway. When in doubt,
squash.
PRs that fix issues should include a reference like `Closes #XXXX` in the
commit message so that github will automatically close the referenced issue
when the PR is merged.
<!--
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
-->
### Sign your PRs
The sign-off is a line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
* We dont typically require 2 LGTMs for this repository.
## Communications
For general questions, or discussions, please use the
For general questions, or discussions, please use the
IRC group on `irc.freenode.net` called `container-projects`
that has been setup.
@ -139,6 +30,6 @@ approval, or if the person requests to be removed then it is automatic.
Normally, a maintainer will only be removed if they are considered to be
inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the
The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file.
-->

View file

@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
GOLANGCI_LINT_VERSION := 1.64.5
GOLANGCI_LINT_VERSION := 2.0.2
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs

View file

@ -1 +1 @@
1.57.2
1.58.0

View file

@ -82,7 +82,7 @@ type Container struct {
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
Flags map[string]any `json:"flags,omitempty"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
@ -196,7 +196,7 @@ func (c *Container) MountOpts() []string {
switch value := c.Flags[mountOptsFlag].(type) {
case []string:
return value
case []interface{}:
case []any:
var mountOpts []string
for _, v := range value {
if flag, ok := v.(string); ok {
@ -458,7 +458,7 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) {
ids := make(map[string]*Container)
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
for locationIndex := range numContainerLocationIndex {
location := containerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
@ -531,7 +531,7 @@ func (r *containerStore) save(saveLocations containerLocations) error {
return err
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
for locationIndex := range numContainerLocationIndex {
location := containerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
@ -641,13 +641,13 @@ func (r *containerStore) ClearFlag(id string, flag string) error {
}
// Requires startWriting.
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
func (r *containerStore) SetFlag(id string, flag string, value any) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
if container.Flags == nil {
container.Flags = make(map[string]interface{})
container.Flags = make(map[string]any)
}
container.Flags[flag] = value
return r.saveFor(container)

View file

@ -111,7 +111,7 @@ type LayerBigDataStore interface {
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type FlaggableStore interface {
ClearFlag(id string, flag string) error
SetFlag(id string, flag string, value interface{}) error
SetFlag(id string, flag string, value any) error
}
// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
@ -195,8 +195,8 @@ type LayerStore interface {
FlaggableStore
RWLayerBigDataStore
Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any) (layer *Layer, err error)
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any, diff io.Reader) (*Layer, int64, error)
SetNames(id string, names []string) error
AddNames(id string, names []string) error
RemoveNames(id string, names []string) error

View file

@ -517,7 +517,7 @@ func (a *Driver) isParent(id, parent string) bool {
if parent == "" && len(parents) > 0 {
return false
}
return !(len(parents) > 0 && parent != parents[0])
return len(parents) == 0 || parent == parents[0]
}
// Diff produces an archive of the changes between the specified
@ -778,6 +778,6 @@ func (a *Driver) SupportsShifting() bool {
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
func (a *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}

View file

@ -1,4 +1,4 @@
//go:build linux && !btrfs_noversion && cgo
//go:build linux && cgo
package btrfs

View file

@ -1,14 +0,0 @@
//go:build linux && btrfs_noversion && cgo
package btrfs
// TODO(vbatts) remove this work-around once supported linux distros are on
// btrfs utilities of >= 3.16.1
func btrfsBuildVersion() string {
return "-"
}
func btrfsLibVersion() int {
return -1
}

View file

@ -36,8 +36,8 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
}
i := inode{
Dev: uint64(st.Dev),
Ino: uint64(st.Ino),
Dev: uint64(st.Dev), //nolint:unconvert
Ino: st.Ino,
}
c.mutex.Lock()

View file

@ -40,7 +40,7 @@ const (
)
// CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive
srcFile, err := os.Open(srcPath)
if err != nil {
return err
@ -72,7 +72,7 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
}
// CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive
// If the destination file already exists, we shouldn't blow it away
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
if err != nil {
@ -160,7 +160,10 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
switch mode := f.Mode(); {
case mode.IsRegular():
id := fileID{dev: uint64(stat.Dev), ino: stat.Ino}
id := fileID{
dev: uint64(stat.Dev), //nolint:unconvert
ino: stat.Ino,
}
if copyMode == Hardlink {
isHardlink = true
if err2 := os.Link(srcPath, dstPath); err2 != nil {
@ -242,12 +245,11 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
}
// system.Chtimes doesn't support a NOFOLLOW flag atm
// nolint: unconvert
if f.IsDir() {
dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
} else if !isSymlink {
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
aTime := time.Unix(stat.Atim.Unix())
mTime := time.Unix(stat.Mtim.Unix())
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
return err
}

View file

@ -24,7 +24,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
}
// CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive,golint // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
f, err := os.Open(srcPath)
if err != nil {
return err
@ -35,6 +35,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
}
// CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive,golint // "func name will be used as copy.CopyRegular by other packages, and that stutters"
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters"
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
}

View file

@ -54,8 +54,8 @@ type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap //nolint: revive,golint
GidMaps []idtools.IDMap //nolint: revive,golint
UidMaps []idtools.IDMap //nolint: revive
GidMaps []idtools.IDMap //nolint: revive
Options []string
// Volatile specifies whether the container storage can be optimized
@ -79,7 +79,7 @@ type ApplyDiffOpts struct {
type ApplyDiffWithDifferOpts struct {
ApplyDiffOpts
Flags map[string]interface{}
Flags map[string]any
}
// DedupArgs contains the information to perform storage deduplication.
@ -222,7 +222,7 @@ type DriverWithDifferOutput struct {
RootDirMode *os.FileMode
// Artifacts is a collection of additional artifacts
// generated by the differ that the storage driver can use.
Artifacts map[string]interface{}
Artifacts map[string]any
}
type DifferOutputFormat int

View file

@ -259,7 +259,7 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
}
defer cleanupFunc()
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, pid); err != nil {
return false, fmt.Errorf("create mapped mount: %w", err)
}
defer func() {

View file

@ -42,7 +42,7 @@ func getComposefsBlob(dataDir string) string {
return filepath.Join(dataDir, "composefs.blob")
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
func generateComposeFsBlob(verityDigests map[string]string, toc any, composefsDir string) error {
if err := os.MkdirAll(composefsDir, 0o700); err != nil {
return err
}
@ -53,7 +53,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
}
destFile := getComposefsBlob(composefsDir)
writerJson, err := getComposeFsHelper()
writerJSON, err := getComposeFsHelper()
if err != nil {
return fmt.Errorf("failed to find mkcomposefs: %w", err)
}
@ -74,7 +74,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
defer outFile.Close()
errBuf := &bytes.Buffer{}
cmd := exec.Command(writerJson, "--from-file", "-", "-")
cmd := exec.Command(writerJSON, "--from-file", "-", "-")
cmd.Stderr = errBuf
cmd.Stdin = dumpReader
cmd.Stdout = outFile

View file

@ -36,7 +36,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
@ -131,6 +130,9 @@ type Driver struct {
usingMetacopy bool
usingComposefs bool
stagingDirsLocksMutex sync.Mutex
// stagingDirsLocks access is not thread safe, it is required that callers take
// stagingDirsLocksMutex on each access to guard against concurrent map writes.
stagingDirsLocks map[string]*lockfile.LockFile
supportsIDMappedMounts *bool
@ -419,7 +421,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if !opts.skipMountHome {
if err := mount.MakePrivate(home); err != nil {
return nil, err
return nil, fmt.Errorf("overlay: failed to make mount private: %w", err)
}
}
@ -429,17 +431,18 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
d := &Driver{
name: "overlay",
home: home,
imageStore: options.ImageStore,
runhome: runhome,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)),
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
supportsVolatile: supportsVolatile,
usingComposefs: opts.useComposefs,
options: *opts,
stagingDirsLocks: make(map[string]*lockfile.LockFile),
name: "overlay",
home: home,
imageStore: options.ImageStore,
runhome: runhome,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)),
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
supportsVolatile: supportsVolatile,
usingComposefs: opts.useComposefs,
options: *opts,
stagingDirsLocksMutex: sync.Mutex{},
stagingDirsLocks: make(map[string]*lockfile.LockFile),
}
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d))
@ -489,7 +492,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
o.quota.Inodes = uint64(inodes)
o.quota.Inodes = inodes
case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
@ -640,6 +643,8 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
case "true":
logrus.Debugf("overlay: storage already configured with a mount-program")
return false, nil
case "false":
// Do nothing.
default:
needsMountProgram, err := scanForMountProgramIndicators(home)
if err != nil && !os.IsNotExist(err) {
@ -653,7 +658,6 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
}
// fall through to check if we find ourselves needing to use a
// mount program now
case "false":
}
for _, dir := range []string{home, runhome} {
@ -868,10 +872,12 @@ func (d *Driver) Cleanup() error {
// pruneStagingDirectories cleans up any staging directory that was leaked.
// It returns whether any staging directory is still present.
func (d *Driver) pruneStagingDirectories() bool {
d.stagingDirsLocksMutex.Lock()
for _, lock := range d.stagingDirsLocks {
lock.Unlock()
}
d.stagingDirsLocks = make(map[string]*lockfile.LockFile)
clear(d.stagingDirsLocks)
d.stagingDirsLocksMutex.Unlock()
anyPresent := false
@ -1157,7 +1163,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
if err != nil {
return err
}
driver.options.quota.Inodes = uint64(inodes)
driver.options.quota.Inodes = inodes
default:
return fmt.Errorf("unknown option %s", key)
}
@ -1343,7 +1349,7 @@ func (d *Driver) recreateSymlinks() error {
return err
}
// Keep looping as long as we take some corrective action in each iteration
var errs *multierror.Error
var errs error
madeProgress := true
iterations := 0
for madeProgress {
@ -1359,7 +1365,7 @@ func (d *Driver) recreateSymlinks() error {
// Read the "link" file under each layer to get the name of the symlink
data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
errs = errors.Join(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
continue
}
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
@ -1368,12 +1374,12 @@ func (d *Driver) recreateSymlinks() error {
err = fileutils.Lexists(linkPath)
if err != nil && os.IsNotExist(err) {
if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
madeProgress = true
} else if err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
}
@ -1384,7 +1390,7 @@ func (d *Driver) recreateSymlinks() error {
// that each symlink we have corresponds to one.
links, err := os.ReadDir(linkDirFullPath)
if err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
// Go through all of the symlinks in the "l" directory
@ -1392,16 +1398,16 @@ func (d *Driver) recreateSymlinks() error {
// Read the symlink's target, which should be "../$layer/diff"
target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name()))
if err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
continue
}
targetComponents := strings.Split(target, string(os.PathSeparator))
if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" {
errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
errs = errors.Join(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
// force the link to be recreated on the next pass
if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil {
if !os.IsNotExist(err) {
errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err))
errs = errors.Join(errs, fmt.Errorf("removing link %q: %w", link, err))
} // else dont report any error, but also dont set madeProgress.
continue
}
@ -1417,7 +1423,7 @@ func (d *Driver) recreateSymlinks() error {
// NOTE: If two or more links point to the same target, we will update linkFile
// with every value of link.Name(), and set madeProgress = true every time.
if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil {
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
errs = errors.Join(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
madeProgress = true
@ -1425,14 +1431,11 @@ func (d *Driver) recreateSymlinks() error {
}
iterations++
if iterations >= maxIterations {
errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph drivers recreateSymlink, giving up", iterations))
errs = errors.Join(errs, fmt.Errorf("reached %d iterations in overlay graph drivers recreateSymlink, giving up", iterations))
break
}
}
if errs != nil {
return errs.ErrorOrNil()
}
return nil
return errs
}
// Get creates and mounts the required file system for the given id and returns the mount path.
@ -1548,7 +1551,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
permsKnown := false
st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil {
perms = os.FileMode(st.Mode())
perms = st.Mode()
permsKnown = true
}
for err == nil {
@ -1563,7 +1566,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err != nil {
return "", err
}
idmappedMountProcessPid = int(pid)
idmappedMountProcessPid = pid
defer cleanupFunc()
}
@ -1635,7 +1638,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lower = path.Join(p, d.name, l)
if st2, err2 := os.Stat(lower); err2 == nil {
if !permsKnown {
perms = os.FileMode(st2.Mode())
perms = st2.Mode()
permsKnown = true
}
break
@ -1656,7 +1659,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
} else {
if !permsKnown {
perms = os.FileMode(st.Mode())
perms = st.Mode()
permsKnown = true
}
lower = newpath
@ -2103,17 +2106,16 @@ func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist)
}
func (g *overlayFileGetter) Close() error {
var errs *multierror.Error
func (g *overlayFileGetter) Close() (errs error) {
for _, f := range g.composefsMounts {
if err := f.Close(); err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
}
if err := unix.Rmdir(f.Name()); err != nil {
errs = multierror.Append(errs, err)
errs = errors.Join(errs, err)
}
}
return errs.ErrorOrNil()
return errs
}
// newStagingDir creates a new staging directory and returns the path to it.
@ -2173,10 +2175,12 @@ func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error)
func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
parentStagingDir := filepath.Dir(stagingDirectory)
d.stagingDirsLocksMutex.Lock()
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
delete(d.stagingDirsLocks, parentStagingDir)
lock.Unlock()
}
d.stagingDirsLocksMutex.Unlock()
return os.RemoveAll(parentStagingDir)
}
@ -2235,11 +2239,15 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt
}
defer func() {
if errRet != nil {
d.stagingDirsLocksMutex.Lock()
delete(d.stagingDirsLocks, layerDir)
d.stagingDirsLocksMutex.Unlock()
lock.Unlock()
}
}()
d.stagingDirsLocksMutex.Lock()
d.stagingDirsLocks[layerDir] = lock
d.stagingDirsLocksMutex.Unlock()
lock.Lock()
logrus.Debugf("Applying differ in %s", applyDir)
@ -2271,10 +2279,12 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr
parentStagingDir := filepath.Dir(stagingDirectory)
defer func() {
d.stagingDirsLocksMutex.Lock()
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
delete(d.stagingDirsLocks, parentStagingDir)
lock.Unlock()
}
d.stagingDirsLocksMutex.Unlock()
}()
diffPath, err := d.getDiffPath(id)
@ -2495,7 +2505,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
perms = *d.options.forceMask
} else {
if err == nil {
perms = os.FileMode(st.Mode())
perms = st.Mode()
}
}
for err == nil {

View file

@ -190,7 +190,8 @@ func NewControl(basePath string) (*Control, error) {
}
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
// for that project id.
// targetPath must exist, must be a directory, and must be empty.
func (q *Control) SetQuota(targetPath string, quota Quota) error {
var projectID uint32
value, ok := q.quotas.Load(targetPath)
@ -200,10 +201,20 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
if !ok {
projectID = q.nextProjectID
// The directory we are setting an ID on must be empty, as
// the ID will not be propagated to pre-existing subdirectories.
dents, err := os.ReadDir(targetPath)
if err != nil {
return fmt.Errorf("reading directory %s: %w", targetPath, err)
}
if len(dents) > 0 {
return fmt.Errorf("can only set project ID on empty directories, %s is not empty", targetPath)
}
//
// assign project id to new container directory
//
err := setProjectID(targetPath, projectID)
err = setProjectID(targetPath, projectID)
if err != nil {
return err
}

View file

@ -93,7 +93,7 @@ type Image struct {
// ReadOnly is true if this image resides in a read-only layer store.
ReadOnly bool `json:"-"`
Flags map[string]interface{} `json:"flags,omitempty"`
Flags map[string]any `json:"flags,omitempty"`
}
// roImageStore provides bookkeeping for information about Images.
@ -675,7 +675,7 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
}
// Requires startWriting.
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
func (r *imageStore) SetFlag(id string, flag string, value any) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
@ -684,7 +684,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
image.Flags = make(map[string]any)
}
image.Flags[flag] = value
return r.Save()

View file

@ -13,7 +13,7 @@ import (
"github.com/sirupsen/logrus"
)
var notSupported = errors.New("reflinks are not supported on this platform")
var errNotSupported = errors.New("reflinks are not supported on this platform")
const (
DedupHashInvalid DedupHashMethod = iota
@ -134,7 +134,7 @@ func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
break
}
logrus.Debugf("Failed to deduplicate: %v", err)
if errors.Is(err, notSupported) {
if errors.Is(err, errNotSupported) {
return dedupBytes, err
}
}
@ -153,7 +153,7 @@ func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
return nil
}); err != nil {
// if reflinks are not supported, return immediately without errors
if errors.Is(err, notSupported) {
if errors.Is(err, errNotSupported) {
return res, nil
}
return res, err

View file

@ -48,7 +48,7 @@ func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
if !ok {
return false, fmt.Errorf("unable to get raw syscall.Stat_t data")
}
return d.recordInode(uint64(st.Dev), st.Ino)
return d.recordInode(uint64(st.Dev), st.Ino) //nolint:unconvert
}
// dedup deduplicates the file at src path to dst path
@ -94,11 +94,11 @@ func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
}
err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value)
if err == nil {
return uint64(value.Info[0].Bytes_deduped), nil
return value.Info[0].Bytes_deduped, nil
}
if errors.Is(err, unix.ENOTSUP) {
return 0, notSupported
return 0, errNotSupported
}
return 0, fmt.Errorf("failed to clone file %q: %w", src, err)
}

View file

@ -9,19 +9,19 @@ import (
type dedupFiles struct{}
func newDedupFiles() (*dedupFiles, error) {
return nil, notSupported
return nil, errNotSupported
}
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
return false, notSupported
return false, errNotSupported
}
// dedup deduplicates the file at src path to dst path
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
return 0, notSupported
return 0, errNotSupported
}
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
return "", notSupported
return "", errNotSupported
}

View file

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"maps"
"math/bits"
"os"
"path"
"path/filepath"
@ -26,7 +27,6 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/tarlog"
"github.com/containers/storage/pkg/truncindex"
multierror "github.com/hashicorp/go-multierror"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux"
@ -47,11 +47,13 @@ const (
type layerLocations uint8
// The backing store is split in two json files, one (the volatile)
// that is written without fsync() meaning it isn't as robust to
// unclean shutdown
// The backing store is split in three json files.
// The volatile store is written without fsync() meaning it isn't as robust to unclean shutdown.
// Optionally, an image store can be configured to store RO layers.
// The stable store is used for the remaining layers that don't go into the other stores.
const (
stableLayerLocation layerLocations = 1 << iota
imageStoreLayerLocation
volatileLayerLocation
numLayerLocationIndex = iota
@ -61,6 +63,10 @@ func layerLocationFromIndex(index int) layerLocations {
return 1 << index
}
func indexFromLayerLocation(location layerLocations) int {
return bits.TrailingZeros(uint(location))
}
// A Layer is a record of a copy-on-write layer that's stored by the lower
// level graph driver.
type Layer struct {
@ -155,7 +161,7 @@ type Layer struct {
GIDs []uint32 `json:"gidset,omitempty"`
// Flags is arbitrary data about the layer.
Flags map[string]interface{} `json:"flags,omitempty"`
Flags map[string]any `json:"flags,omitempty"`
// UIDMap and GIDMap are used for setting up a layer's contents
// for use inside of a user namespace where UID mapping is being used.
@ -165,8 +171,8 @@ type Layer struct {
// ReadOnly is true if this layer resides in a read-only layer store.
ReadOnly bool `json:"-"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
// location is the location of the store where the layer is present.
location layerLocations `json:"-"`
// BigDataNames is a list of names of data items that we keep for the
// convenience of the caller. They can be large, and are only in
@ -431,14 +437,6 @@ type layerStore struct {
driver drivers.Driver
}
// The caller must hold r.inProcessLock for reading.
func layerLocation(l *Layer) layerLocations {
if l.volatileStore {
return volatileLayerLocation
}
return stableLayerLocation
}
func copyLayer(l *Layer) *Layer {
return &Layer{
ID: l.ID,
@ -456,7 +454,7 @@ func copyLayer(l *Layer) *Layer {
TOCDigest: l.TOCDigest,
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
volatileStore: l.volatileStore,
location: l.location,
BigDataNames: copySlicePreferringNil(l.BigDataNames),
Flags: copyMapPreferringNil(l.Flags),
UIDMap: copySlicePreferringNil(l.UIDMap),
@ -658,8 +656,12 @@ func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
// If the layers.json file or container-layers.json has been
// modified manually, then we have to reload the storage in
// any case.
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
info, err := os.Stat(r.jsonPath[locationIndex])
for locationIndex := range numLayerLocationIndex {
rpath := r.jsonPath[locationIndex]
if rpath == "" {
continue
}
info, err := os.Stat(rpath)
if err != nil && !os.IsNotExist(err) {
return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err)
}
@ -792,9 +794,12 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
layers := []*Layer{}
ids := make(map[string]*Layer)
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
for locationIndex := range numLayerLocationIndex {
location := layerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
if rpath == "" {
continue
}
info, err := os.Stat(rpath)
if err != nil {
if !os.IsNotExist(err) {
@ -821,9 +826,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
continue // skip invalid duplicated layer
}
// Remember where the layer came from
if location == volatileLayerLocation {
layer.volatileStore = true
}
layer.location = location
layers = append(layers, layer)
ids[layer.ID] = layer
}
@ -844,7 +847,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
errorToResolveBySaving = ErrDuplicateLayerNames
modifiedLocations |= layerLocation(conflict)
modifiedLocations |= conflict.location
}
names[name] = layers[n]
}
@ -919,7 +922,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
var layersToDelete []*Layer
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
if layerHasIncompleteFlag(layer) {
// Important: Do not call r.deleteInternal() here. It modifies r.layers
@ -937,10 +940,10 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
// Don't return the error immediately, because deleteInternal does not saveLayers();
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
// deleted incomplete layers have their metadata correctly removed.
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
incompleteDeletionErrors = errors.Join(incompleteDeletionErrors,
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
}
modifiedLocations |= layerLocation(layer)
modifiedLocations |= layer.location
}
if err := r.saveLayers(modifiedLocations); err != nil {
return false, err
@ -1009,7 +1012,7 @@ func (r *layerStore) save(saveLocations layerLocations) error {
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for WRITING.
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
return r.save(layerLocation(modifiedLayer))
return r.save(modifiedLayer.location)
}
// The caller must hold r.lockfile locked for writing.
@ -1028,18 +1031,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
for locationIndex := range numLayerLocationIndex {
location := layerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
if rpath == "" {
return fmt.Errorf("internal error: no path for location %v", location)
}
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
subsetLayers := make([]*Layer, 0, len(r.layers))
for _, layer := range r.layers {
if layerLocation(layer) == location {
if layer.location == location {
subsetLayers = append(subsetLayers, layer)
}
}
@ -1139,12 +1145,17 @@ func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.
if transient {
volatileDir = rundir
}
layersImageDir := ""
if imagedir != "" {
layersImageDir = filepath.Join(imagedir, "layers.json")
}
rlstore := layerStore{
lockfile: newMultipleLockFile(lockFiles...),
mountsLockfile: mountsLockfile,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
layersImageDir,
filepath.Join(volatileDir, "volatile-layers.json"),
},
layerdir: layerdir,
@ -1182,6 +1193,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
filepath.Join(layerdir, "layers.json"),
"",
filepath.Join(layerdir, "volatile-layers.json"),
},
layerdir: layerdir,
@ -1249,7 +1261,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
}
// Requires startWriting.
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
func (r *layerStore) SetFlag(id string, flag string, value any) error {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
@ -1258,7 +1270,7 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
return ErrLayerUnknown
}
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
layer.Flags[flag] = value
return r.saveFor(layer)
@ -1330,6 +1342,17 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
return copyLayer(layer), nil
}
func (r *layerStore) pickStoreLocation(volatile, writeable bool) layerLocations {
switch {
case volatile:
return volatileLayerLocation
case !writeable && r.jsonPath[indexFromLayerLocation(imageStoreLayerLocation)] != "":
return imageStoreLayerLocation
default:
return stableLayerLocation
}
}
// Requires startWriting.
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
if moreOptions == nil {
@ -1422,7 +1445,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
UIDMap: copySlicePreferringNil(moreOptions.UIDMap),
GIDMap: copySlicePreferringNil(moreOptions.GIDMap),
BigDataNames: []string{},
volatileStore: moreOptions.Volatile,
location: r.pickStoreLocation(moreOptions.Volatile, writeable),
}
layer.Flags[incompleteFlag] = true
@ -1908,7 +1931,7 @@ func (r *layerStore) deleteInternal(id string) error {
// Ensure that if we are interrupted, the layer will be cleaned up.
if !layerHasIncompleteFlag(layer) {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
layer.Flags[incompleteFlag] = true
if err := r.saveFor(layer); err != nil {
@ -2256,33 +2279,33 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
// but they modify in-memory state.
fgetter, err := r.newFileGetter(to)
if err != nil {
errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err))
errs := fmt.Errorf("creating file-getter: %w", err)
if err := decompressor.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err))
}
if err := tsfile.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err))
}
return nil, errs.ErrorOrNil()
return nil, errs
}
tarstream := asm.NewOutputTarStream(fgetter, metadata)
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
var errs *multierror.Error
var errs error
if err := decompressor.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err))
}
if err := tsfile.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err))
}
if err := tarstream.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
}
if err := fgetter.Close(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err))
errs = errors.Join(errs, fmt.Errorf("closing file-getter: %w", err))
}
if errs != nil {
return errs.ErrorOrNil()
return errs
}
return nil
})
@ -2452,16 +2475,12 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
for uid := range uidLog {
layer.UIDs = append(layer.UIDs, uid)
}
sort.Slice(layer.UIDs, func(i, j int) bool {
return layer.UIDs[i] < layer.UIDs[j]
})
slices.Sort(layer.UIDs)
layer.GIDs = make([]uint32, 0, len(gidLog))
for gid := range gidLog {
layer.GIDs = append(layer.GIDs, gid)
}
sort.Slice(layer.GIDs, func(i, j int) bool {
return layer.GIDs[i] < layer.GIDs[j]
})
slices.Sort(layer.GIDs)
err = r.saveFor(layer)
@ -2517,7 +2536,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
layer.Metadata = diffOutput.Metadata
if options != nil && options.Flags != nil {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
layer.Flags = make(map[string]any)
}
maps.Copy(layer.Flags, options.Flags)
}

View file

@ -16,6 +16,7 @@ import (
"strings"
"sync"
"syscall"
"time"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
@ -52,7 +53,7 @@ type (
// This is additional data to be used by the converter. It will
// not survive a round trip through JSON, so it's primarily
// intended for generating archives (i.e., converting writes).
WhiteoutData interface{}
WhiteoutData any
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
@ -67,6 +68,8 @@ type (
CopyPass bool
// ForceMask, if set, indicates the permission mask used for created files.
ForceMask *os.FileMode
// Timestamp, if set, will be set in each header as create/mod/access time
Timestamp *time.Time
}
)
@ -78,10 +81,9 @@ const (
windows = "windows"
darwin = "darwin"
freebsd = "freebsd"
linux = "linux"
)
var xattrsToIgnore = map[string]interface{}{
var xattrsToIgnore = map[string]any{
"security.selinux": true,
}
@ -179,6 +181,7 @@ func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
defer func() {
if Err != nil {
// In the normal case, the buffer is embedded in the ReadCloser return.
p.Put(buf)
}
}()
@ -375,7 +378,7 @@ type nosysFileInfo struct {
os.FileInfo
}
func (fi nosysFileInfo) Sys() interface{} {
func (fi nosysFileInfo) Sys() any {
// A Sys value of type *tar.Header is safe as it is system-independent.
// The tar.FileInfoHeader function copies the fields into the returned
// header without performing any OS lookups.
@ -475,7 +478,7 @@ type TarWhiteoutConverter interface {
ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error)
}
type tarAppender struct {
type tarWriter struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
@ -494,15 +497,19 @@ type tarAppender struct {
// from the traditional behavior/format to get features like subsecond
// precision in timestamps.
CopyPass bool
// Timestamp, if set, will be set in each header as create/mod/access time
Timestamp *time.Time
}
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
return &tarAppender{
func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter {
return &tarWriter{
SeenFiles: make(map[uint64]string),
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
IDMappings: idMapping,
ChownOpts: chownOpts,
Timestamp: timestamp,
}
}
@ -521,8 +528,8 @@ func canonicalTarName(name string, isDir bool) (string, error) {
return name, nil
}
// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
// addFile adds a file from `path` as `name` to the tar archive.
func (ta *tarWriter) addFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
return err
@ -600,6 +607,13 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Gname = ""
}
// if override timestamp set, replace all times with this
if ta.Timestamp != nil {
hdr.ModTime = *ta.Timestamp
hdr.AccessTime = *ta.Timestamp
hdr.ChangeTime = *ta.Timestamp
}
maybeTruncateHeaderModTime(hdr)
if ta.WhiteoutConverter != nil {
@ -650,7 +664,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
@ -673,7 +687,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if fi, err := os.Lstat(path); err != nil || !fi.IsDir() {
if err := os.Mkdir(path, mask); err != nil {
return err
}
@ -691,7 +705,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
file.Close()
return err
}
file.Close()
if err := file.Close(); err != nil {
return err
}
case tar.TypeBlock, tar.TypeChar:
if inUserns { // cannot create devices in a userns
@ -845,41 +861,39 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
defer func() {
if err := dest.Close(); err != nil && result == nil {
result = err
}
}()
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
if err != nil {
return nil, err
}
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
if err != nil {
return err
}
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(dest, options.Compression)
if err != nil {
return err
}
compressWriter, err := CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err
}
go func() {
ta := newTarAppender(
ta := newTarWriter(
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
compressWriter,
options.ChownOpts,
options.Timestamp,
)
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
ta.CopyPass = options.CopyPass
includeFiles := options.IncludeFiles
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Errorf("Can't close pipe writer: %s", err)
if err := compressWriter.Close(); err != nil && result == nil {
result = err
}
}()
@ -893,7 +907,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
stat, err := os.Lstat(srcPath)
if err != nil {
return
return err
}
if !stat.IsDir() {
@ -901,22 +915,22 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// 'walk' will error if "file/." is stat-ed and "file" is not a
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
if len(includeFiles) > 0 {
logrus.Warn("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
srcPath = dir
options.IncludeFiles = []string{base}
includeFiles = []string{base}
}
if len(options.IncludeFiles) == 0 {
options.IncludeFiles = []string{"."}
if len(includeFiles) == 0 {
includeFiles = []string{"."}
}
seen := make(map[string]bool)
for _, include := range options.IncludeFiles {
for _, include := range includeFiles {
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
@ -1002,7 +1016,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
if err := ta.addFile(filePath, relFilePath); err != nil {
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
@ -1011,10 +1025,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
return nil
}); err != nil {
logrus.Errorf("%s", err)
return
return err
}
}
return ta.TarWriter.Close()
}
pipeReader, pipeWriter := io.Pipe()
go func() {
err := tarWithOptionsTo(pipeWriter, srcPath, options)
if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil {
logrus.Errorf("Can't close pipe writer: %s", pipeErr)
}
}()
return pipeReader, nil
@ -1110,7 +1132,7 @@ loop:
continue
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
if err := os.RemoveAll(path); err != nil {
return err
}
@ -1137,7 +1159,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return err
}
@ -1201,9 +1223,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
r := tarArchive
if decompress {
@ -1389,7 +1408,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
} else if runtime.GOOS == darwin {
uid, gid = hdr.Uid, hdr.Gid
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
attrs := strings.Split(string(xstat), ":")
attrs := strings.Split(xstat, ":")
if len(attrs) >= 3 {
val, err := strconv.ParseUint(attrs[0], 10, 32)
if err != nil {

View file

@ -16,7 +16,7 @@ func getOverlayOpaqueXattrName() string {
return GetOverlayXattrName("opaque")
}
func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter {
func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
if rolayers, ok := data.([]string); ok && len(rolayers) > 0 {
return overlayWhiteoutConverter{rolayers: rolayers}
@ -173,7 +173,7 @@ func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (boo
func isWhiteOut(stat os.FileInfo) bool {
s := stat.Sys().(*syscall.Stat_t)
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 //nolint:unconvert
}
func GetFileOwner(path string) (uint32, uint32, uint32, error) {

View file

@ -67,7 +67,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
@ -82,7 +82,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
func getInodeFromStat(stat any) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
@ -92,7 +92,7 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) {
return
}
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
func getFileUIDGID(stat any) (idtools.IDPair, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {

View file

@ -70,7 +70,7 @@ func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
return a.Equal(b) ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
@ -452,7 +452,7 @@ func ChangesSize(newDir string, changes []Change) int64 {
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil)
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
@ -481,7 +481,7 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
if err := ta.addFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}

View file

@ -174,14 +174,7 @@ func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
for ix1 < len(names1) && ix2 < len(names2) {
ni1 := names1[ix1]
ni2 := names2[ix2]
@ -304,7 +297,7 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno)
continue
}
builder := make([]byte, 0, dirent.Reclen)
for i := 0; i < len(dirent.Name); i++ {
for i := range len(dirent.Name) {
if dirent.Name[i] == 0 {
break
}

View file

@ -31,9 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
aufsTempdir := ""
@ -107,7 +104,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}
}
@ -176,12 +173,12 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
// We always reset the immutable flag (if present) to allow metadata
// changes and to allow directory modification. The flag will be
// re-applied based on the contents of hdr either at the end for
// directories or in createTarFile otherwise.
// directories or in extractTarFileEntry otherwise.
if fi, err := os.Lstat(path); err == nil {
if err := resetImmutable(path, &fi); err != nil {
return 0, err
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
@ -212,7 +209,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}

View file

@ -69,9 +69,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
options = &archive.TarOptions{}
options.InUserNS = unshare.IsRootless()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMappings.RootPair()

View file

@ -98,9 +98,6 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
options.InUserNS = true
}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
data, err := json.Marshal(options)
if err != nil {

View file

@ -1,4 +0,0 @@
package chrootarchive
func init() {
}

View file

@ -1,4 +0,0 @@
package chrootarchive
func init() {
}

View file

@ -65,7 +65,7 @@ func (bf *bloomFilter) writeTo(writer io.Writer) error {
if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
return err
}
if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil {
if err := binary.Write(writer, binary.LittleEndian, bf.k); err != nil {
return err
}
if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {

View file

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"maps"
"slices"
"strconv"
"time"
@ -17,7 +18,6 @@ import (
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
expMaps "golang.org/x/exp/maps"
)
const (
@ -87,7 +87,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
}
size := int64(blobSize - footerSize - tocOffset)
size := blobSize - footerSize - tocOffset
// set a reasonable limit
if size > maxTocSize {
// Not errFallbackCanConvert: we would still use too much memory.
@ -310,7 +310,7 @@ func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
return err
}
if len(pendingFiles) != 0 {
remaining := expMaps.Keys(pendingFiles)
remaining := slices.Collect(maps.Keys(pendingFiles))
if len(remaining) > 5 {
remaining = remaining[:5] // Just to limit the size of the output.
}

View file

@ -142,10 +142,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
rc.IsLastChunkZeros = false
if rc.pendingHole > 0 {
toCopy := int64(len(b))
if rc.pendingHole < toCopy {
toCopy = rc.pendingHole
}
toCopy := min(rc.pendingHole, int64(len(b)))
rc.pendingHole -= toCopy
for i := int64(0); i < toCopy; i++ {
b[i] = 0
@ -163,7 +160,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
return false, 0, io.EOF
}
for i := 0; i < len(b); i++ {
for i := range b {
holeLen, n, err := rc.reader.readByte()
if err != nil {
if err == io.EOF {

View file

@ -43,7 +43,7 @@ func escaped(val []byte, escape int) string {
}
var result string
for _, c := range []byte(val) {
for _, c := range val {
hexEscape := false
var special string
@ -214,7 +214,7 @@ func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[s
}
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
func GenerateDump(tocI any, verityDigests map[string]string) (io.Reader, error) {
toc, ok := tocI.(*minimal.TOC)
if !ok {
return nil, fmt.Errorf("invalid TOC type")

Some files were not shown because too many files have changed in this diff Show more