Port osbuild/images v0.33.0 with dot-notation to composer

Update the osbuild/images to the version which introduces "dot notation"
for distro release versions.

 - Replace all uses of distroregistry by distrofactory.
 - Delete local version of reporegistry and use the one from the
   osbuild/images.
 - Weldr: unify `createWeldrAPI()` and `createWeldrAPI2()` into a single
   `createTestWeldrAPI()` function`.
 - store/fixture: rework fixtures to allow overriding the host distro
   name and host architecture name. A cleanup function to restore the
   host distro and arch names is always part of the fixture struct.
 - Delete `distro_mock` package, since it is no longer used.
 - Bump the required version of osbuild to 98, because the OSCAP
   customization is using the 'compress_results' stage option, which is
   not available in older versions of osbuild.

Signed-off-by: Tomáš Hozza <thozza@redhat.com>
This commit is contained in:
Tomáš Hozza 2024-01-08 17:58:49 +01:00 committed by Achilleas Koutsou
parent f6ff8c40dd
commit 625b1578fa
1166 changed files with 154457 additions and 5508 deletions

View file

@ -0,0 +1,284 @@
package directory
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
const version = "Directory Transport Version: 1.1\n"
// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created
// using the 'dir' transport
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
type dirImageDestination struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoPutBlobPartialInitialize
stubs.AlwaysSupportsSignatures
ref dirReference
}
// newImageDestination returns an ImageDestination for writing to a directory.
func newImageDestination(sys *types.SystemContext, ref dirReference) (private.ImageDestination, error) {
desiredLayerCompression := types.PreserveOriginal
if sys != nil {
if sys.DirForceCompress {
desiredLayerCompression = types.Compress
if sys.DirForceDecompress {
return nil, fmt.Errorf("Cannot compress and decompress at the same time")
}
}
if sys.DirForceDecompress {
desiredLayerCompression = types.Decompress
}
}
// If directory exists check if it is empty
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
// if the contents don't match throw an error
dirExists, err := pathExists(ref.resolvedPath)
if err != nil {
return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err)
}
if dirExists {
isEmpty, err := isDirEmpty(ref.resolvedPath)
if err != nil {
return nil, err
}
if !isEmpty {
versionExists, err := pathExists(ref.versionPath())
if err != nil {
return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err)
}
if versionExists {
contents, err := os.ReadFile(ref.versionPath())
if err != nil {
return nil, err
}
// check if contents of version file is what we expect it to be
if string(contents) != version {
return nil, ErrNotContainerImageDir
}
} else {
return nil, ErrNotContainerImageDir
}
// delete directory contents so that only one image is in the directory at a time
if err = removeDirContents(ref.resolvedPath); err != nil {
return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err)
}
logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath)
}
} else {
// create directory if it doesn't exist
if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil {
return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err)
}
}
// create version file
err = os.WriteFile(ref.versionPath(), []byte(version), 0644)
if err != nil {
return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err)
}
d := &dirImageDestination{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
SupportedManifestMIMETypes: nil,
DesiredLayerCompression: desiredLayerCompression,
AcceptsForeignLayerURLs: false,
MustMatchRuntimeOS: false,
IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
HasThreadSafePutBlob: true,
}),
NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
ref: ref,
}
d.Compat = impl.AddCompat(d)
return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *dirImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dirImageDestination) Close() error {
return nil
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
if err != nil {
return private.UploadedBlob{}, err
}
succeeded := false
explicitClosed := false
defer func() {
if !explicitClosed {
blobFile.Close()
}
if !succeeded {
os.Remove(blobFile.Name())
}
}()
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return private.UploadedBlob{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return private.UploadedBlob{}, err
}
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
// On Windows, the “permissions of newly created files” argument to syscall.Open is
// ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod,
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
return private.UploadedBlob{}, err
}
}
blobPath := d.ref.layerPath(blobDigest)
// need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
return private.UploadedBlob{}, err
}
succeeded = true
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if info.Digest == "" {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
return false, private.ReusedBlob{}, nil
}
if err != nil {
return false, private.ReusedBlob{}, err
}
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when
// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated
// by `manifest.Digest()`.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
}
// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// MUST be called after PutManifest (signatures may reference manifest contents).
func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
for i, sig := range signatures {
blob, err := signature.Blob(sig)
if err != nil {
return err
}
if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil {
return err
}
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
// original manifest list digest, if desired.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error {
return nil
}
// returns true if path exists
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// returns true if directory is empty
func isDirEmpty(path string) (bool, error) {
files, err := os.ReadDir(path)
if err != nil {
return false, err
}
return len(files) == 0, nil
}
// deletes the contents of a directory
func removeDirContents(path string) error {
files, err := os.ReadDir(path)
if err != nil {
return err
}
for _, file := range files {
if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,102 @@
package directory
import (
"context"
"fmt"
"io"
"os"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
type dirImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
impl.DoesNotAffectLayerInfosForCopy
stubs.NoGetBlobAtInitialize
ref dirReference
}
// newImageSource returns an ImageSource reading from an existing directory.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ref dirReference) private.ImageSource {
s := &dirImageSource{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
HasThreadSafeGetBlob: false,
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
ref: ref,
}
s.Compat = impl.AddCompat(s)
return s
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (s *dirImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *dirImageSource) Close() error {
return nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
if err != nil {
return nil, "", err
}
return m, manifest.GuessMIMEType(m), err
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest))
if err != nil {
return nil, -1, err
}
fi, err := r.Stat()
if err != nil {
return nil, -1, err
}
return r, fi.Size(), nil
}
// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
signatures := []signature.Signature{}
for i := 0; ; i++ {
path := s.ref.signaturePath(i, instanceDigest)
sigBlob, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
break
}
return nil, err
}
signature, err := signature.FromBlob(sigBlob)
if err != nil {
return nil, fmt.Errorf("parsing signature %q: %w", path, err)
}
signatures = append(signatures, signature)
}
return signatures, nil
}

View file

@ -0,0 +1,188 @@
package directory
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
func init() {
transports.Register(Transport)
}
// Transport is an ImageTransport for directory paths.
var Transport = dirTransport{}
type dirTransport struct{}
func (t dirTransport) Name() string {
return "dir"
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) {
return NewReference(reference)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
if !strings.HasPrefix(scope, "/") {
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
if scope == "/" {
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
}
cleaned := filepath.Clean(scope)
if cleaned != scope {
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
}
return nil
}
// dirReference is an ImageReference for directory paths.
type dirReference struct {
// Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
// Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on.
// Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid
// being exposed to symlinks and renames in the parent directories to the working directory).
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
path string // As specified by the user. May be relative, contain symlinks, etc.
resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
}
// There is no directory.ParseReference because it is rather pointless.
// Callers who need a transport-independent interface will go through
// dirTransport.ParseReference; callers who intentionally deal with directories
// can use directory.NewReference.
// NewReference returns a directory reference for a specified path.
//
// We do not expose an API supplying the resolvedPath; we could, but recomputing it
// is generally cheap enough that we prefer being confident about the properties of resolvedPath.
func NewReference(path string) (types.ImageReference, error) {
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path)
if err != nil {
return nil, err
}
return dirReference{path: path, resolvedPath: resolved}, nil
}
func (ref dirReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dirReference) StringWithinTransport() string {
return ref.path
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref dirReference) DockerReference() reference.Named {
return nil
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
// (i.e. various references with exactly the same semantics should return the same configuration identity)
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref dirReference) PolicyConfigurationIdentity() string {
return ref.resolvedPath
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref dirReference) PolicyConfigurationNamespaces() []string {
res := []string{}
path := ref.resolvedPath
for {
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 || lastSlash == 0 {
break
}
path = path[:lastSlash]
res = append(res, path)
}
// Note that we do not include "/"; it is redundant with the default "" global default,
// and rejected by dirTransport.ValidatePolicyConfigurationScope above.
return res
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(ref), nil
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
return errors.New("Deleting images not implemented for dir: images")
}
// manifestPath returns a path for the manifest within a directory using our conventions.
func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string {
if instanceDigest != nil {
return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json")
}
return filepath.Join(ref.path, "manifest.json")
}
// layerPath returns a path for a layer tarball within a directory using our conventions.
func (ref dirReference) layerPath(digest digest.Digest) string {
// FIXME: Should we keep the digest identification?
return filepath.Join(ref.path, digest.Encoded())
}
// signaturePath returns a path for a signature within a directory using our conventions.
func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string {
if instanceDigest != nil {
return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1))
}
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
}
// versionPath returns a path for the version file within a directory using our conventions.
func (ref dirReference) versionPath() string {
return filepath.Join(ref.path, "version")
}

View file

@ -0,0 +1,101 @@
package daemon
import (
"net/http"
"path/filepath"
"github.com/containers/image/v5/types"
dockerclient "github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
)
const (
// The default API version to be used in case none is explicitly specified
defaultAPIVersion = "1.22"
)
// NewDockerClient initializes a new API client based on the passed SystemContext.
func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
host := dockerclient.DefaultDockerHost
if sys != nil && sys.DockerDaemonHost != "" {
host = sys.DockerDaemonHost
}
opts := []dockerclient.Opt{
dockerclient.WithHost(host),
dockerclient.WithVersion(defaultAPIVersion),
}
// We conditionalize building the TLS configuration only to TLS sockets:
//
// The dockerclient.Client implementation differentiates between
// - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows)
// - Client.scheme, which is what is sent over the connection (HTTP with/without TLS).
//
// Only Client.proto is set from the URL in dockerclient.WithHost(),
// Client.scheme is detected based on a http.Client.TLSClientConfig presence;
// dockerclient.WithHTTPClient with a client that has TLSClientConfig set
// will, by default, trigger an attempt to use TLS.
//
// So, dont use WithHTTPClient for unix:// sockets at all.
//
// Similarly, if we want to communicate over plain HTTP on a TCP socket (http://),
// we also should not set TLSClientConfig. We continue to use WithHTTPClient
// with our slightly non-default settings to avoid a behavior change on updates of c/image.
//
// Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic
// explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ;
// so that would not be any simpler.
serverURL, err := dockerclient.ParseHostURL(host)
if err != nil {
return nil, err
}
switch serverURL.Scheme {
case "unix": // Nothing
case "http":
hc := httpConfig()
opts = append(opts, dockerclient.WithHTTPClient(hc))
default:
hc, err := tlsConfig(sys)
if err != nil {
return nil, err
}
opts = append(opts, dockerclient.WithHTTPClient(hc))
}
return dockerclient.NewClientWithOpts(opts...)
}
func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
options := tlsconfig.Options{}
if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify {
options.InsecureSkipVerify = true
}
if sys != nil && sys.DockerDaemonCertPath != "" {
options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem")
options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem")
options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem")
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
return &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
CheckRedirect: dockerclient.CheckRedirect,
}, nil
}
func httpConfig() *http.Client {
return &http.Client{
Transport: &http.Transport{
TLSClientConfig: nil,
},
CheckRedirect: dockerclient.CheckRedirect,
}
}

View file

@ -0,0 +1,186 @@
package daemon
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
type daemonImageDestination struct {
ref daemonReference
mustMatchRuntimeOS bool
*tarfile.Destination // Implements most of types.ImageDestination
archive *tarfile.Writer
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
statusChannel <-chan error
writer *io.PipeWriter
// Other state
committed bool // writer has been closed
}
// newImageDestination returns a types.ImageDestination for the specified image reference.
func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageDestination, error) {
if ref.ref == nil {
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
var mustMatchRuntimeOS = true
if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost {
mustMatchRuntimeOS = false
}
c, err := newDockerClient(sys)
if err != nil {
return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
reader, writer := io.Pipe()
archive := tarfile.NewWriter(writer)
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
statusChannel := make(chan error, 1)
goroutineContext, goroutineCancel := context.WithCancel(ctx)
go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef),
archive: archive,
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
committed: false,
}, nil
}
// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) {
defer c.Close()
err := errors.New("Internal error: unexpected panic in imageLoadGoroutine")
defer func() {
logrus.Debugf("docker-daemon: sending done, status %v", err)
statusChannel <- err
}()
defer func() {
if err == nil {
reader.Close()
} else {
if err := reader.CloseWithError(err); err != nil {
logrus.Debugf("imageLoadGoroutine: Error during reader.CloseWithError: %v", err)
}
}
}()
err = imageLoad(ctx, c, reader)
}
// imageLoad accepts tar stream on reader and sends it to c
func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
resp, err := c.ImageLoad(ctx, reader, true)
if err != nil {
return fmt.Errorf("starting a load operation in docker engine: %w", err)
}
defer resp.Body.Close()
// jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage,
// copied here to minimize dependencies.
type jsonError struct {
Message string `json:"message,omitempty"`
}
type jsonMessage struct {
Error *jsonError `json:"errorDetail,omitempty"`
}
dec := json.NewDecoder(resp.Body)
for {
var msg jsonMessage
if err := dec.Decode(&msg); err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("parsing docker load progress: %w", err)
}
if msg.Error != nil {
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
}
}
return nil // No error reported = success
}
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.PreserveOriginal
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
return d.mustMatchRuntimeOS
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *daemonImageDestination) Close() error {
if !d.committed {
logrus.Debugf("docker-daemon: Closing tar stream to abort loading")
// In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing.
// In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including
// https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the
// net/http version with native Context support in Go 1.7) do not always actually immediately cancel
// the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and
// return early if the context is canceled without terminating the goroutine at all.
// So we need this CloseWithError to terminate sending the HTTP request Body
// immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
// the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
// Whether that works or not, closing the PipeWriter seems desirable in any case.
if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil {
return err
}
}
d.goroutineCancel()
return nil
}
func (d *daemonImageDestination) Reference() types.ImageReference {
return d.ref
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
// original manifest list digest, if desired.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
logrus.Debugf("docker-daemon: Closing tar stream")
if err := d.archive.Close(); err != nil {
return err
}
if err := d.writer.Close(); err != nil {
return err
}
d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine.
logrus.Debugf("docker-daemon: Waiting for status")
select {
case <-ctx.Done():
return ctx.Err()
case err := <-d.statusChannel:
return err
}
}

View file

@ -0,0 +1,56 @@
package daemon
import (
"context"
"fmt"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
)
type daemonImageSource struct {
ref daemonReference
*tarfile.Source // Implements most of types.ImageSource
}
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
//
// It would be great if we were able to stream the input tar as it is being
// sent; but Docker sends the top-level manifest, which determines which paths
// to look for, at the end, so in we will need to seek back and re-read, several times.
// (We could, perhaps, expect an exact sequence, assume that the first plaintext file
// is the config, and that the following len(RootFS) files are the layers, but that feels
// way too brittle.)
func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) {
c, err := newDockerClient(sys)
if err != nil {
return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
defer c.Close()
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
if err != nil {
return nil, fmt.Errorf("loading image from docker engine: %w", err)
}
defer inputStream.Close()
archive, err := tarfile.NewReaderFromStream(sys, inputStream)
if err != nil {
return nil, err
}
src := tarfile.NewSource(archive, true, ref.Transport().Name(), nil, -1)
return &daemonImageSource{
ref: ref,
Source: src,
}, nil
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (s *daemonImageSource) Reference() types.ImageReference {
return s.ref
}

View file

@ -0,0 +1,219 @@
package daemon
import (
"context"
"errors"
"fmt"
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
func init() {
transports.Register(Transport)
}
// Transport is an ImageTransport for images managed by a local Docker daemon.
var Transport = daemonTransport{}
type daemonTransport struct{}
// Name returns the name of the transport, which must be unique among other transports.
func (t daemonTransport) Name() string {
return "docker-daemon"
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) {
return ParseReference(reference)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// ID values cannot be effectively namespaced, and are clearly invalid host:port values.
if _, err := digest.Parse(scope); err == nil {
return fmt.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope)
}
// FIXME? We could be verifying the various character set and length restrictions
// from docker/distribution/reference.regexp.go, but other than that there
// are few semantically invalid strings.
return nil
}
// daemonReference is an ImageReference for images managed by a local Docker daemon
// Exactly one of id and ref can be set.
// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
// For daemonImageDestination, it must be a ref, which is NamedTagged.
// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we dont bother.)
type daemonReference struct {
id digest.Digest
ref reference.Named // !reference.IsNameOnly
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
// This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases.
// In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars).
// digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag).
// reference.ParseAnyReference interprets such strings as digests.
if dgst, err := digest.Parse(refString); err == nil {
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
// Other digest references are ambiguous, so refuse them.
if dgst.Algorithm() != digest.Canonical {
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
}
return NewReference(dgst, nil)
}
ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values
if err != nil {
return nil, err
}
if reference.FamiliarName(ref) == digest.Canonical.String() {
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
}
return NewReference("", ref)
}
// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly)
func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) {
if id != "" && ref != nil {
return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time")
}
if ref != nil {
if reference.IsNameOnly(ref) {
return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// Most versions of docker/reference do not handle that (ignoring the tag), so reject such input.
// This MAY be accepted in the future.
// (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
// the tag or the digest first?)
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
}
}
return daemonReference{
id: id,
ref: ref,
}, nil
}
func (ref daemonReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref daemonReference) StringWithinTransport() string {
switch {
case ref.id != "":
return ref.id.String()
case ref.ref != nil:
return reference.FamiliarString(ref.ref)
default: // Coverage: Should never happen, NewReference above should refuse such values.
panic("Internal inconsistency: daemonReference has empty id and nil ref")
}
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref daemonReference) DockerReference() reference.Named {
return ref.ref // May be nil
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
// (i.e. various references with exactly the same semantics should return the same configuration identity)
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref daemonReference) PolicyConfigurationIdentity() string {
// We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible.
// But the existence of image IDs means that we cant truly well namespace the input:
// a single image can be namespaced either using the name or the ID depending on how it is named.
//
// Thats fairly unexpected, but we have to cope somehow.
//
// So, use the ordinary docker/policyconfiguration namespacing for named images.
// image IDs all fall into the root namespace.
// Users can set up the root namespace to be either untrusted or rejected,
// and to set up specific trust for named namespaces. This allows verifying image
// identity when a name is known, and unnamed images would be untrusted or rejected.
switch {
case ref.id != "":
return "" // This still allows using the default "" scope to define a global policy for ID-identified images.
case ref.ref != nil:
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
}
return res
default: // Coverage: Should never happen, NewReference above should refuse such values.
panic("Internal inconsistency: daemonReference has empty id and nil ref")
}
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref daemonReference) PolicyConfigurationNamespaces() []string {
// See the explanation in daemonReference.PolicyConfigurationIdentity.
switch {
case ref.id != "":
return []string{}
case ref.ref != nil:
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
default: // Coverage: Should never happen, NewReference above should refuse such values.
panic("Internal inconsistency: daemonReference has empty id and nil ref")
}
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(ctx, sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(ctx, sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
// Should this just untag the image? Should this stop running containers?
// The semantics is not quite as clear as for remote repositories.
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
return errors.New("Deleting images not implemented for docker-daemon: images")
}

View file

@ -123,6 +123,9 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
if !ok {
return "", errors.New("ref must be a dockerReference")
}
if dr.isUnknownDigest {
return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport())
}
tagOrDigest, err := dr.tagOrDigest()
if err != nil {

View file

@ -452,7 +452,15 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
var refTail string
if instanceDigest != nil {
// If d.ref.isUnknownDigest=true, then we push without a tag, so get the
// digest that will be used
if d.ref.isUnknownDigest {
digest, err := manifest.Digest(m)
if err != nil {
return err
}
refTail = digest.String()
} else if instanceDigest != nil {
// If the instanceDigest is provided, then use it as the refTail, because the reference,
// whether it includes a tag or a digest, refers to the list as a whole, and not this
// particular instance.

View file

@ -38,8 +38,8 @@ type dockerImageSource struct {
impl.DoesNotAffectLayerInfosForCopy
stubs.ImplementsGetBlobAt
logicalRef dockerReference // The reference the user requested.
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest
c *dockerClient
// State
cachedManifest []byte // nil if not loaded yet
@ -48,7 +48,12 @@ type dockerImageSource struct {
// newImageSource creates a new ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
// The caller must ensure !ref.isUnknownDigest.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
if ref.isUnknownDigest {
return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport())
}
registryConfig, err := loadRegistryConfiguration(sys)
if err != nil {
return nil, err
@ -121,7 +126,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
// The caller must call .Close() on the returned ImageSource.
func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource,
registryConfig *registryConfiguration) (*dockerImageSource, error) {
physicalRef, err := newReference(pullSource.Reference)
physicalRef, err := newReference(pullSource.Reference, false)
if err != nil {
return nil, err
}
@ -591,6 +596,10 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con
// deleteImage deletes the named image from the registry, if supported.
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
if ref.isUnknownDigest {
return fmt.Errorf("Docker reference without a tag or digest cannot be deleted")
}
registryConfig, err := loadRegistryConfiguration(sys)
if err != nil {
return err

View file

@ -12,6 +12,11 @@ import (
"github.com/containers/image/v5/types"
)
// UnknownDigestSuffix can be appended to a reference when the caller
// wants to push an image without a tag or digest.
// NewReferenceUnknownDigest() is called when this const is detected.
const UnknownDigestSuffix = "@@unknown-digest@@"
func init() {
transports.Register(Transport)
}
@ -43,7 +48,8 @@ func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
// dockerReference is an ImageReference for Docker images.
type dockerReference struct {
ref reference.Named // By construction we know that !reference.IsNameOnly(ref)
ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true
isUnknownDigest bool
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
@ -51,23 +57,46 @@ func ParseReference(refString string) (types.ImageReference, error) {
if !strings.HasPrefix(refString, "//") {
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
}
// Check if ref has UnknownDigestSuffix suffixed to it
unknownDigest := false
if strings.HasSuffix(refString, UnknownDigestSuffix) {
unknownDigest = true
refString = strings.TrimSuffix(refString, UnknownDigestSuffix)
}
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
if err != nil {
return nil, err
}
if unknownDigest {
if !reference.IsNameOnly(ref) {
return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix)
}
return NewReferenceUnknownDigest(ref)
}
ref = reference.TagNameOnly(ref)
return NewReference(ref)
}
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
func NewReference(ref reference.Named) (types.ImageReference, error) {
return newReference(ref)
return newReference(ref, false)
}
// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting
// a tag on the registry. The reference must satisfy reference.IsNameOnly()
func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) {
return newReference(ref, true)
}
// newReference returns a dockerReference for a named reference.
func newReference(ref reference.Named) (dockerReference, error) {
if reference.IsNameOnly(ref) {
return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) {
if reference.IsNameOnly(ref) && !unknownDigest {
return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref))
}
if !reference.IsNameOnly(ref) && unknownDigest {
return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we cant ask for an image with a specific
@ -81,7 +110,8 @@ func newReference(ref reference.Named) (dockerReference, error) {
}
return dockerReference{
ref: ref,
ref: ref,
isUnknownDigest: unknownDigest,
}, nil
}
@ -95,7 +125,11 @@ func (ref dockerReference) Transport() types.ImageTransport {
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dockerReference) StringWithinTransport() string {
return "//" + reference.FamiliarString(ref.ref)
famString := "//" + reference.FamiliarString(ref.ref)
if ref.isUnknownDigest {
return famString + UnknownDigestSuffix
}
return famString
}
// DockerReference returns a Docker reference associated with this reference
@ -113,6 +147,9 @@ func (ref dockerReference) DockerReference() reference.Named {
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref dockerReference) PolicyConfigurationIdentity() string {
if ref.isUnknownDigest {
return ref.ref.Name()
}
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
@ -126,7 +163,13 @@ func (ref dockerReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref dockerReference) PolicyConfigurationNamespaces() []string {
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref)
if ref.isUnknownDigest {
if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() {
namespaces = namespaces[1:]
}
}
return namespaces
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
@ -163,6 +206,10 @@ func (ref dockerReference) tagOrDigest() (string, error) {
if ref, ok := ref.ref.(reference.NamedTagged); ok {
return ref.Tag(), nil
}
if ref.isUnknownDigest {
return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref))
}
// This should not happen, NewReference above refuses reference.IsNameOnly values.
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,226 @@
package openshift
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/version"
"github.com/sirupsen/logrus"
)
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
type openshiftClient struct {
ref openshiftReference
baseURL *url.URL
// Values from Kubernetes configuration
httpClient *http.Client
bearerToken string // "" if not used
username string // "" if not used
password string // if username != ""
}
// newOpenshiftClient creates a new openshiftClient for the specified reference.
func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
// We have already done this parsing in ParseReference, but thrown away
// httpClient. So, parse again.
// (We could also rework/split restClientFor to "get base URL" to be done
// in ParseReference, and "get httpClient" to be done here. But until/unless
// we support non-default clusters, this is good enough.)
// Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client.
cmdConfig := defaultClientConfig()
logrus.Debugf("cmdConfig: %#v", cmdConfig)
restConfig, err := cmdConfig.ClientConfig()
if err != nil {
return nil, err
}
// REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.)
logrus.Debugf("restConfig: %#v", restConfig)
baseURL, httpClient, err := restClientFor(restConfig)
if err != nil {
return nil, err
}
logrus.Debugf("URL: %#v", *baseURL)
if httpClient == nil {
httpClient = http.DefaultClient
}
return &openshiftClient{
ref: ref,
baseURL: baseURL,
httpClient: httpClient,
bearerToken: restConfig.BearerToken,
username: restConfig.Username,
password: restConfig.Password,
}, nil
}
func (c *openshiftClient) close() {
c.httpClient.CloseIdleConnections()
}
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) {
requestURL := *c.baseURL
requestURL.Path = path
var requestBodyReader io.Reader
if requestBody != nil {
logrus.Debugf("Will send body: %s", requestBody)
requestBodyReader = bytes.NewReader(requestBody)
}
req, err := http.NewRequestWithContext(ctx, method, requestURL.String(), requestBodyReader)
if err != nil {
return nil, err
}
if len(c.bearerToken) != 0 {
req.Header.Set("Authorization", "Bearer "+c.bearerToken)
} else if len(c.username) != 0 {
req.SetBasicAuth(c.username, c.password)
}
req.Header.Set("Accept", "application/json, */*")
req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version))
if requestBody != nil {
req.Header.Set("Content-Type", "application/json")
}
logrus.Debugf("%s %s", method, requestURL.Redacted())
res, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody)
if err != nil {
return nil, err
}
logrus.Debugf("Got body: %s", body)
// FIXME: Just throwing this useful information away only to try to guess later...
logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type"))
var status status
statusValid := false
if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 {
statusValid = true
}
switch {
case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient.
if statusValid && status.Status != "Success" {
return nil, errors.New(status.Message)
}
case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent:
// OK.
default:
if statusValid {
return nil, errors.New(status.Message)
}
return nil, fmt.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
}
return body, nil
}
// getImage loads the specified image object.
func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
// FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
body, err := c.doRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
}
// Note: This does absolutely no kind/version checking or conversions.
var isi imageStreamImage
if err := json.Unmarshal(body, &isi); err != nil {
return nil, err
}
return &isi.Image, nil
}
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
_, repo, gotRepo := strings.Cut(ref, "/")
if !gotRepo {
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
}
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
}
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
type imageStream struct {
Status imageStreamStatus `json:"status,omitempty"`
}
type imageStreamStatus struct {
DockerImageRepository string `json:"dockerImageRepository"`
Tags []namedTagEventList `json:"tags,omitempty"`
}
type namedTagEventList struct {
Tag string `json:"tag"`
Items []tagEvent `json:"items"`
}
type tagEvent struct {
DockerImageReference string `json:"dockerImageReference"`
Image string `json:"image"`
}
type imageStreamImage struct {
Image image `json:"image"`
}
type image struct {
objectMeta `json:"metadata,omitempty"`
DockerImageReference string `json:"dockerImageReference,omitempty"`
// DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"`
DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"`
DockerImageManifest string `json:"dockerImageManifest,omitempty"`
// DockerImageLayers []ImageLayer `json:"dockerImageLayers"`
Signatures []imageSignature `json:"signatures,omitempty"`
}
const imageSignatureTypeAtomic string = "atomic"
type imageSignature struct {
typeMeta `json:",inline"`
objectMeta `json:"metadata,omitempty"`
Type string `json:"type"`
Content []byte `json:"content"`
// Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
// ImageIdentity string `json:"imageIdentity,omitempty"`
// SignedClaims map[string]string `json:"signedClaims,omitempty"`
// Created *unversioned.Time `json:"created,omitempty"`
// IssuedBy SignatureIssuer `json:"issuedBy,omitempty"`
// IssuedTo SignatureSubject `json:"issuedTo,omitempty"`
}
type typeMeta struct {
Kind string `json:"kind,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
}
type objectMeta struct {
Name string `json:"name,omitempty"`
GenerateName string `json:"generateName,omitempty"`
Namespace string `json:"namespace,omitempty"`
SelfLink string `json:"selfLink,omitempty"`
ResourceVersion string `json:"resourceVersion,omitempty"`
Generation int64 `json:"generation,omitempty"`
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
}
// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status
type status struct {
Status string `json:"status,omitempty"`
Message string `json:"message,omitempty"`
// Reason StatusReason `json:"reason,omitempty"`
// Details *StatusDetails `json:"details,omitempty"`
Code int32 `json:"code,omitempty"`
}

View file

@ -0,0 +1,248 @@
package openshift
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
type openshiftImageDestination struct {
impl.Compat
stubs.AlwaysSupportsSignatures
client *openshiftClient
docker private.ImageDestination // The docker/distribution API endpoint
// State
imageStreamImageName string // "" if not yet known
}
// newImageDestination creates a new ImageDestination for the specified reference.
func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (private.ImageDestination, error) {
client, err := newOpenshiftClient(ref)
if err != nil {
return nil, err
}
// FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
// i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
// the manifest digest at this point.
dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
dockerRef, err := docker.ParseReference(dockerRefString)
if err != nil {
return nil, err
}
docker, err := dockerRef.NewImageDestination(ctx, sys)
if err != nil {
return nil, err
}
d := &openshiftImageDestination{
client: client,
docker: imagedestination.FromPublic(docker),
}
d.Compat = impl.AddCompat(d)
return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *openshiftImageDestination) Reference() types.ImageReference {
return d.client.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *openshiftImageDestination) Close() error {
err := d.docker.Close()
d.client.close()
return err
}
func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
return d.docker.SupportedManifestMIMETypes()
}
func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.docker.IgnoresEmbeddedDockerReference()
}
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
return false
}
// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
return d.docker.SupportsPutBlobPartial()
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
}
// PutBlobPartial attempts to create a blob using the data that is already present
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
return d.docker.TryReusingBlobWithOptions(ctx, info, options)
}
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
if instanceDigest == nil {
manifestDigest, err := manifest.Digest(m)
if err != nil {
return err
}
d.imageStreamImageName = manifestDigest.String()
}
return d.docker.PutManifest(ctx, m, instanceDigest)
}
// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// MUST be called after PutManifest (signatures may reference manifest contents).
func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
var imageStreamImageName string
if instanceDigest == nil {
if d.imageStreamImageName == "" {
return errors.New("Internal error: Unknown manifest digest, can't add signatures")
}
imageStreamImageName = d.imageStreamImageName
} else {
imageStreamImageName = instanceDigest.String()
}
// Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures.
if len(signatures) == 0 {
return nil // No need to even read the old state.
}
image, err := d.client.getImage(ctx, imageStreamImageName)
if err != nil {
return err
}
existingSigNames := set.New[string]()
for _, sig := range image.Signatures {
existingSigNames.Add(sig.objectMeta.Name)
}
for _, newSigWithFormat := range signatures {
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
if !ok {
return signature.UnsupportedFormatError(newSigWithFormat)
}
newSig := newSigSimple.UntrustedSignature()
if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool {
return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
}) {
continue
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
var signatureName string
for {
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
if !existingSigNames.Contains(signatureName) {
break
}
}
// Note: This does absolutely no kind/version checking or conversions.
sig := imageSignature{
typeMeta: typeMeta{
Kind: "ImageSignature",
APIVersion: "v1",
},
objectMeta: objectMeta{Name: signatureName},
Type: imageSignatureTypeAtomic,
Content: newSig,
}
body, err := json.Marshal(sig)
if err != nil {
return err
}
_, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
if err != nil {
return err
}
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
// original manifest list digest, if desired.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
return d.docker.Commit(ctx, unparsedToplevel)
}

View file

@ -0,0 +1,174 @@
package openshift
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
type openshiftImageSource struct {
impl.Compat
impl.DoesNotAffectLayerInfosForCopy
// This is slightly suboptimal. We could forward GetBlobAt(), but we need to call ensureImageIsResolved in SupportsGetBlobAt(),
// and that method doesnt provide a context for timing out. That could actually be fixed (SupportsGetBlobAt is private and we
// can change it), but this is a deprecated transport anyway, so for now we just punt.
stubs.NoGetBlobAtInitialize
client *openshiftClient
// Values specific to this image
sys *types.SystemContext
// State
docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet
imageStreamImageName string // Resolved image identifier, or "" if not known yet
}
// newImageSource creates a new ImageSource for the specified reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(sys *types.SystemContext, ref openshiftReference) (private.ImageSource, error) {
client, err := newOpenshiftClient(ref)
if err != nil {
return nil, err
}
s := &openshiftImageSource{
NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
client: client,
sys: sys,
}
s.Compat = impl.AddCompat(s)
return s, nil
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (s *openshiftImageSource) Reference() types.ImageReference {
return s.client.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *openshiftImageSource) Close() error {
var err error
if s.docker != nil {
err = s.docker.Close()
s.docker = nil
}
s.client.close()
return err
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, "", err
}
return s.docker.GetManifest(ctx, instanceDigest)
}
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
return false
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, 0, err
}
return s.docker.GetBlob(ctx, info, cache)
}
// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
var imageStreamImageName string
if instanceDigest == nil {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, err
}
imageStreamImageName = s.imageStreamImageName
} else {
imageStreamImageName = instanceDigest.String()
}
image, err := s.client.getImage(ctx, imageStreamImageName)
if err != nil {
return nil, err
}
var sigs []signature.Signature
for _, sig := range image.Signatures {
if sig.Type == imageSignatureTypeAtomic {
sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
}
}
return sigs, nil
}
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
if s.docker != nil {
return nil
}
// FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return err
}
// Note: This does absolutely no kind/version checking or conversions.
var is imageStream
if err := json.Unmarshal(body, &is); err != nil {
return err
}
var te *tagEvent
for _, tag := range is.Status.Tags {
if tag.Tag != s.client.ref.dockerReference.Tag() {
continue
}
if len(tag.Items) > 0 {
te = &tag.Items[0]
break
}
}
if te == nil {
return errors.New("No matching tag found")
}
logrus.Debugf("tag event %#v", te)
dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
if err != nil {
return err
}
logrus.Debugf("Resolved reference %#v", dockerRefString)
dockerRef, err := docker.ParseReference("//" + dockerRefString)
if err != nil {
return err
}
d, err := dockerRef.NewImageSource(ctx, s.sys)
if err != nil {
return err
}
s.docker = d
s.imageStreamImageName = te.Image
return nil
}

View file

@ -0,0 +1,153 @@
package openshift
import (
"context"
"errors"
"fmt"
"strings"
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
genericImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/regexp"
)
func init() {
transports.Register(Transport)
}
// Transport is an ImageTransport for OpenShift registry-hosted images.
var Transport = openshiftTransport{}
type openshiftTransport struct{}
func (t openshiftTransport) Name() string {
return "atomic"
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) {
return ParseReference(reference)
}
// Note that imageNameRegexp is namespace/stream:tag, this
// is HOSTNAME/namespace/stream:tag or parent prefixes.
// Keep this in sync with imageNameRegexp!
var scopeRegexp = regexp.Delayed("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
if scopeRegexp.FindStringIndex(scope) == nil {
return fmt.Errorf("Invalid scope name %s", scope)
}
return nil
}
// openshiftReference is an ImageReference for OpenShift images.
type openshiftReference struct {
dockerReference reference.NamedTagged
namespace string // Computed from dockerReference in advance.
stream string // Computed from dockerReference in advance.
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference.
func ParseReference(ref string) (types.ImageReference, error) {
r, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, fmt.Errorf("failed to parse image reference %q: %w", ref, err)
}
tagged, ok := r.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
}
return NewReference(tagged)
}
// NewReference returns an OpenShift reference for a reference.NamedTagged
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
r := strings.SplitN(reference.Path(dockerRef), "/", 3)
if len(r) != 2 {
return nil, fmt.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
reference.FamiliarString(dockerRef))
}
return openshiftReference{
namespace: r[0],
stream: r[1],
dockerReference: dockerRef,
}, nil
}
func (ref openshiftReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref openshiftReference) StringWithinTransport() string {
return reference.FamiliarString(ref.dockerReference)
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref openshiftReference) DockerReference() reference.Named {
return ref.dockerReference
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
// (i.e. various references with exactly the same semantics should return the same configuration identity)
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref openshiftReference) PolicyConfigurationIdentity() string {
res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference)
if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference.
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
}
return res
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference)
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return genericImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(ctx, sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
return errors.New("Deleting images not implemented for atomic: images")
}

View file

@ -0,0 +1,517 @@
//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"unsafe"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include <stdlib.h>
// #include <ostree.h>
// #include <gio/ginputstream.h>
// #include <selinux/selinux.h>
// #include <selinux/label.h>
import "C"
type blobToImport struct {
Size int64
Digest digest.Digest
BlobPath string
}
type descriptor struct {
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
}
type fsLayersSchema1 struct {
BlobSum digest.Digest `json:"blobSum"`
}
type manifestSchema struct {
LayersDescriptors []descriptor `json:"layers"`
FSLayers []fsLayersSchema1 `json:"fsLayers"`
}
type ostreeImageDestination struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoPutBlobPartialInitialize
stubs.AlwaysSupportsSignatures
ref ostreeReference
manifest string
schema manifestSchema
tmpDirPath string
blobs map[string]*blobToImport
digest digest.Digest
signaturesLen int
repo *C.struct_OstreeRepo
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) {
tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
d := &ostreeImageDestination{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType},
DesiredLayerCompression: types.PreserveOriginal,
AcceptsForeignLayerURLs: false,
MustMatchRuntimeOS: true,
IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
HasThreadSafePutBlob: false,
}),
NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
ref: ref,
manifest: "",
schema: manifestSchema{},
tmpDirPath: tmpDirPath,
blobs: map[string]*blobToImport{},
digest: "",
signaturesLen: 0,
repo: nil,
}
d.Compat = impl.AddCompat(d)
return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *ostreeImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *ostreeImageDestination) Close() error {
if d.repo != nil {
C.g_object_unref(C.gpointer(d.repo))
}
return os.RemoveAll(d.tmpDirPath)
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil {
return private.UploadedBlob{}, err
}
blobPath := filepath.Join(tmpDir, "content")
blobFile, err := os.Create(blobPath)
if err != nil {
return private.UploadedBlob{}, err
}
defer blobFile.Close()
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return private.UploadedBlob{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return private.UploadedBlob{}, err
}
hash := blobDigest.Hex()
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
entries, err := os.ReadDir(dir)
if err != nil {
return err
}
for _, entry := range entries {
fullpath := filepath.Join(dir, entry.Name())
if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
if err := os.Remove(fullpath); err != nil {
return err
}
continue
}
info, err := entry.Info()
if err != nil {
return err
}
if selinuxHnd != nil {
relPath, err := filepath.Rel(root, fullpath)
if err != nil {
return err
}
// Handle /exports/hostfs as a special case. Files under this directory are copied to the host,
// thus we benefit from maintaining the same SELinux label they would have on the host as we could
// use hard links instead of copying the files.
relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/"))
relPathC := C.CString(relPath)
defer C.free(unsafe.Pointer(relPathC))
var context *C.char
res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
if int(res) < 0 && err != syscall.ENOENT {
return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err)
}
if int(res) == 0 {
defer C.freecon(context)
fullpathC := C.CString(fullpath)
defer C.free(unsafe.Pointer(fullpathC))
res, err = C.lsetfilecon_raw(fullpathC, context)
if int(res) < 0 {
return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err)
}
}
}
if entry.IsDir() {
if usermode {
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
return err
}
}
err = fixFiles(selinuxHnd, root, fullpath, usermode)
if err != nil {
return err
}
} else if usermode && (entry.Type().IsRegular()) {
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
return err
}
}
}
return nil
}
func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error {
opts := otbuiltin.NewCommitOptions()
opts.AddMetadataString = metadata
opts.Timestamp = time.Now()
// OCI layers have no parent OSTree commit
opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000"
_, err := repo.Commit(root, branch, opts)
return err
}
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
mfz := pgzip.NewWriter(output)
defer mfz.Close()
metaPacker := storage.NewJSONPacker(mfz)
stream, err := os.OpenFile(file, os.O_RDONLY, 0)
if err != nil {
return "", -1, err
}
defer stream.Close()
gzReader, err := archive.DecompressStream(stream)
if err != nil {
return "", -1, err
}
defer gzReader.Close()
its, err := asm.NewInputTarStream(gzReader, metaPacker, nil)
if err != nil {
return "", -1, err
}
digester := digest.Canonical.Digester()
written, err := io.Copy(digester.Hash(), its)
if err != nil {
return "", -1, err
}
return digester.Digest(), written, nil
}
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
return err
}
defer func() {
os.Remove(blob.BlobPath)
os.RemoveAll(destinationPath)
}()
var tarSplitOutput bytes.Buffer
uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath)
if err != nil {
return err
}
if os.Getuid() == 0 {
if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
return err
}
if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil {
return err
}
} else {
os.MkdirAll(destinationPath, 0755)
if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil {
return err
}
if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil {
return err
}
}
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size),
fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize),
fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()),
fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))})
}
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Dir(blob.BlobPath)
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
return false, private.ReusedBlob{}, err
}
d.repo = repo
}
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
return found, private.ReusedBlob{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return false, private.ReusedBlob{}, err
}
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported by "ostree:"`)
}
d.manifest = string(manifestBlob)
if err := json.Unmarshal(manifestBlob, &d.schema); err != nil {
return err
}
manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath())
if err := ensureParentDirectoryExists(manifestPath); err != nil {
return err
}
digest, err := manifest.Digest(manifestBlob)
if err != nil {
return err
}
d.digest = digest
return os.WriteFile(manifestPath, manifestBlob, 0644)
}
// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// MUST be called after PutManifest (signatures may reference manifest contents).
func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported by "ostree:"`)
}
path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0))
if err := ensureParentDirectoryExists(path); err != nil {
return err
}
for i, sig := range signatures {
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
blob, err := signature.Blob(sig)
if err != nil {
return err
}
if err := os.WriteFile(signaturePath, blob, 0644); err != nil {
return err
}
}
d.signaturesLen = len(signatures)
return nil
}
func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
repo, err := otbuiltin.OpenRepo(d.ref.repo)
if err != nil {
return err
}
_, err = repo.PrepareTransaction()
if err != nil {
return err
}
var selinuxHnd *C.struct_selabel_handle
if os.Getuid() == 0 && selinux.GetEnabled() {
selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
if selinuxHnd == nil {
return fmt.Errorf("cannot open the SELinux DB: %w", err)
}
defer C.selabel_close(selinuxHnd)
}
checkLayer := func(hash string) error {
blob := d.blobs[hash]
// if the blob is not present in d.blobs then it is already stored in OSTree,
// and we don't need to import it.
if blob == nil {
return nil
}
err := d.importBlob(selinuxHnd, repo, blob)
if err != nil {
return err
}
delete(d.blobs, hash)
return nil
}
for _, layer := range d.schema.LayersDescriptors {
hash := layer.Digest.Hex()
if err = checkLayer(hash); err != nil {
return err
}
}
for _, layer := range d.schema.FSLayers {
hash := layer.BlobSum.Hex()
if err = checkLayer(hash); err != nil {
return err
}
}
// Import the other blobs that are not layers
for _, blob := range d.blobs {
err := d.importConfig(repo, blob)
if err != nil {
return err
}
}
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)),
fmt.Sprintf("signatures=%d", d.signaturesLen),
fmt.Sprintf("docker.digest=%s", string(d.digest))}
if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil {
return err
}
_, err = repo.CommitTransaction()
return err
}
func ensureDirectoryExists(path string) error {
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
}
return nil
}
func ensureParentDirectoryExists(path string) error {
return ensureDirectoryExists(filepath.Dir(path))
}

View file

@ -0,0 +1,450 @@
//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"strconv"
"strings"
"unsafe"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include <stdlib.h>
// #include <ostree.h>
// #include <gio/ginputstream.h>
import "C"
type ostreeImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoGetBlobAtInitialize
ref ostreeReference
tmpDir string
repo *C.struct_OstreeRepo
// get the compressed layer by its uncompressed checksum
compressed map[digest.Digest]digest.Digest
}
// newImageSource returns an ImageSource for reading from an existing directory.
func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) {
s := &ostreeImageSource{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
HasThreadSafeGetBlob: false,
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
ref: ref,
tmpDir: tmpDir,
compressed: nil,
}
s.Compat = impl.AddCompat(s)
return s, nil
}
// Reference returns the reference used to set up this source.
func (s *ostreeImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *ostreeImageSource) Close() error {
if s.repo != nil {
C.g_object_unref(C.gpointer(s.repo))
}
return nil
}
func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) {
var metadataKey string
if isCompressed {
metadataKey = "docker.uncompressed_size"
} else {
metadataKey = "docker.size"
}
b := fmt.Sprintf("ociimage/%s", blob)
found, data, err := readMetadata(s.repo, b, metadataKey)
if err != nil || !found {
return 0, err
}
return strconv.ParseInt(data, 10, 64)
}
func (s *ostreeImageSource) getLenSignatures() (int64, error) {
b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
found, data, err := readMetadata(s.repo, b, "signatures")
if err != nil {
return -1, err
}
if !found {
// if 'signatures' is not present, just return 0 signatures.
return 0, nil
}
return strconv.ParseInt(data, 10, 64)
}
func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) {
b := fmt.Sprintf("ociimage/%s", blob)
found, out, err := readMetadata(s.repo, b, "tarsplit.output")
if err != nil || !found {
return nil, err
}
return base64.StdEncoding.DecodeString(out)
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as the primary manifest can not be a list, so there can be non-default instances.
func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`)
}
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, "", err
}
s.repo = repo
}
b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
found, out, err := readMetadata(s.repo, b, "docker.manifest")
if err != nil {
return nil, "", err
}
if !found {
return nil, "", errors.New("manifest not found")
}
m := []byte(out)
return m, manifest.GuessMIMEType(m), nil
}
func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return nil, "", errors.New("manifest lists are not supported by this transport")
}
func openRepo(path string) (*C.struct_OstreeRepo, error) {
var cerr *C.GError
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
repo := C.ostree_repo_new(pathc)
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
if !r {
C.g_object_unref(C.gpointer(repo))
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
return repo, nil
}
type ostreePathFileGetter struct {
repo *C.struct_OstreeRepo
parentRoot *C.GFile
}
type ostreeReader struct {
stream *C.GFileInputStream
}
func (o ostreeReader) Close() error {
C.g_object_unref(C.gpointer(o.stream))
return nil
}
func (o ostreeReader) Read(p []byte) (int, error) {
var cerr *C.GError
instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type())
stream := (*C.GInputStream)(unsafe.Pointer(instanceCast))
b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr)
if b == nil {
return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
defer C.g_bytes_unref(b)
count := int(C.g_bytes_get_size(b))
if count == 0 {
return 0, io.EOF
}
data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
copy(p, data)
return count, nil
}
func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) {
var cerr *C.GError
var ref *C.char
defer C.free(unsafe.Pointer(ref))
cCommit := C.CString(commit)
defer C.free(unsafe.Pointer(cCommit))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) {
return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
if ref == nil {
return false, "", nil
}
var variant *C.GVariant
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) {
return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
defer C.g_variant_unref(variant)
if variant != nil {
cKey := C.CString(key)
defer C.free(unsafe.Pointer(cKey))
metadata := C.g_variant_get_child_value(variant, 0)
defer C.g_variant_unref(metadata)
data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil)
if data != nil {
defer C.g_variant_unref(data)
ptr := (*C.char)(C.g_variant_get_string(data, nil))
val := C.GoString(ptr)
return true, val, nil
}
}
return false, "", nil
}
func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) {
var cerr *C.GError
var parentRoot *C.GFile
cCommit := C.CString(commit)
defer C.free(unsafe.Pointer(cCommit))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) {
return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
C.g_object_ref(C.gpointer(repo))
return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil
}
func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) {
var file *C.GFile
if strings.HasPrefix(filename, "./") {
filename = filename[2:]
}
cfilename := C.CString(filename)
defer C.free(unsafe.Pointer(cfilename))
file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename))
var cerr *C.GError
stream := C.g_file_read(file, nil, &cerr)
if stream == nil {
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
return &ostreeReader{stream: stream}, nil
}
func (o ostreePathFileGetter) Close() {
C.g_object_unref(C.gpointer(o.repo))
C.g_object_unref(C.gpointer(o.parentRoot))
}
func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) {
getter, err := newOSTreePathFileGetter(s.repo, commit)
if err != nil {
return nil, err
}
defer getter.Close()
return getter.Get(path)
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
blob := info.Digest.Hex()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
_, err := s.LayerInfosForCopy(ctx, nil)
if err != nil {
return nil, -1, err
}
}
compressedBlob, isCompressed := s.compressed[info.Digest]
if isCompressed {
blob = compressedBlob.Hex()
}
branch := fmt.Sprintf("ociimage/%s", blob)
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, 0, err
}
s.repo = repo
}
layerSize, err := s.getBlobUncompressedSize(blob, isCompressed)
if err != nil {
return nil, 0, err
}
tarsplit, err := s.getTarSplitData(blob)
if err != nil {
return nil, 0, err
}
// if tarsplit is nil we are looking at the manifest. Return directly the file in /content
if tarsplit == nil {
file, err := s.readSingleFile(branch, "/content")
if err != nil {
return nil, 0, err
}
return file, layerSize, nil
}
mf := bytes.NewReader(tarsplit)
mfz, err := pgzip.NewReader(mf)
if err != nil {
return nil, 0, err
}
metaUnpacker := storage.NewJSONUnpacker(mfz)
getter, err := newOSTreePathFileGetter(s.repo, branch)
if err != nil {
mfz.Close()
return nil, 0, err
}
ots := asm.NewOutputTarStream(getter, metaUnpacker)
rc := ioutils.NewReadCloserWrapper(ots, func() error {
getter.Close()
mfz.Close()
return ots.Close()
})
return rc, layerSize, nil
}
// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if instanceDigest != nil {
return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
lenSignatures, err := s.getLenSignatures()
if err != nil {
return nil, err
}
branch := fmt.Sprintf("ociimage/%s", s.ref.branchName)
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, err
}
s.repo = repo
}
signatures := []signature.Signature{}
for i := int64(1); i <= lenSignatures; i++ {
path := fmt.Sprintf("/signature-%d", i)
sigReader, err := s.readSingleFile(branch, path)
if err != nil {
return nil, err
}
defer sigReader.Close()
sigBlob, err := io.ReadAll(sigReader)
if err != nil {
return nil, err
}
sig, err := signature.FromBlob(sigBlob)
if err != nil {
return nil, fmt.Errorf("parsing signature %q: %w", path, err)
}
signatures = append(signatures, sig)
}
return signatures, nil
}
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
// to read the image's layers.
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as the primary manifest can not be a list, so there can be secondary manifests.
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
if instanceDigest != nil {
return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
updatedBlobInfos := []types.BlobInfo{}
manifestBlob, manifestType, err := s.GetManifest(ctx, nil)
if err != nil {
return nil, err
}
man, err := manifest.FromBlob(manifestBlob, manifestType)
s.compressed = make(map[digest.Digest]digest.Digest)
layerBlobs := man.LayerInfos()
for _, layerBlob := range layerBlobs {
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return nil, err
}
found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return nil, err
}
uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64)
if err != nil {
return nil, err
}
uncompressedDigest := digest.Digest(uncompressedDigestStr)
blobInfo := types.BlobInfo{
Digest: uncompressedDigest,
Size: uncompressedSize,
MediaType: layerBlob.MediaType,
}
s.compressed[uncompressedDigest] = layerBlob.Digest
updatedBlobInfos = append(updatedBlobInfos, blobInfo)
}
return updatedBlobInfos, nil
}

View file

@ -0,0 +1,242 @@
//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/regexp"
)
const defaultOSTreeRepo = "/ostree/repo"
// Transport is an ImageTransport for ostree paths.
var Transport = ostreeTransport{}
type ostreeTransport struct{}
func (t ostreeTransport) Name() string {
return "ostree"
}
func init() {
transports.Register(Transport)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
sep := strings.Index(scope, ":")
if sep < 0 {
return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
}
repo := scope[:sep]
if !strings.HasPrefix(repo, "/") {
return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
}
cleaned := filepath.Clean(repo)
if cleaned != repo {
return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
// FIXME? In the namespaces within a repo,
// we could be verifying the various character set and length restrictions
// from docker/distribution/reference.regexp.go, but other than that there
// are few semantically invalid strings.
return nil
}
// ostreeReference is an ImageReference for ostree paths.
type ostreeReference struct {
image string
branchName string
repo string
}
type ostreeImageCloser struct {
types.ImageCloser
size int64
}
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
image, repoPart, gotRepoPart := strings.Cut(ref, "@/")
if !gotRepoPart {
repo = defaultOSTreeRepo
} else {
repo = "/" + repoPart
}
return NewReference(image, repo)
}
// NewReference returns an OSTree reference for a specified repo and image.
func NewReference(image string, repo string) (types.ImageReference, error) {
// image is not _really_ in a containers/image/docker/reference format;
// as far as the libOSTree ociimage/* namespace is concerned, it is more or
// less an arbitrary string with an implied tag.
// Parse the image using reference.ParseNormalizedNamed so that we can
// check whether the images has a tag specified and we can add ":latest" if needed
ostreeImage, err := reference.ParseNormalizedNamed(image)
if err != nil {
return nil, err
}
if reference.IsNameOnly(ostreeImage) {
image = image + ":latest"
}
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo)
if err != nil {
// With os.IsNotExist(err), the parent directory of repo is also not existent;
// that should ordinarily not happen, but it would be a bit weird to reject
// references which do not specify a repo just because the implicit defaultOSTreeRepo
// does not exist.
if os.IsNotExist(err) && repo == defaultOSTreeRepo {
resolved = repo
} else {
return nil, err
}
}
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
image: image,
branchName: encodeOStreeRef(image),
repo: resolved,
}, nil
}
func (ref ostreeReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ostreeReference) StringWithinTransport() string {
return fmt.Sprintf("%s@%s", ref.image, ref.repo)
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref ostreeReference) DockerReference() reference.Named {
return nil
}
func (ref ostreeReference) PolicyConfigurationIdentity() string {
return fmt.Sprintf("%s:%s", ref.repo, ref.image)
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
repo, _, gotTag := strings.Cut(ref.image, ":")
if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
}
name := repo
res := []string{}
for {
res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))
lastSlash := strings.LastIndex(name, "/")
if lastSlash == -1 {
break
}
name = name[:lastSlash]
}
return res
}
func (s *ostreeImageCloser) Size() (int64, error) {
return s.size, nil
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
var tmpDir string
if sys == nil || sys.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = sys.OSTreeTmpDirPath
}
return newImageSource(tmpDir, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
var tmpDir string
if sys == nil || sys.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = sys.OSTreeTmpDirPath
}
return newImageDestination(ref, tmpDir)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
return errors.New("Deleting images not implemented for ostree: images")
}
var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`)
func encodeOStreeRef(in string) string {
var buffer bytes.Buffer
for i := range in {
sub := in[i : i+1]
if ostreeRefRegexp.MatchString(sub) {
buffer.WriteString(sub)
} else {
buffer.WriteString(fmt.Sprintf("_%02X", sub[0]))
}
}
return buffer.String()
}
// manifestPath returns a path for the manifest within a ostree using our conventions.
func (ref ostreeReference) manifestPath() string {
return filepath.Join("manifest", "manifest.json")
}
// signaturePath returns a path for a signature within a ostree using our conventions.
func (ref ostreeReference) signaturePath(index int) string {
return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1))
}

210
vendor/github.com/containers/image/v5/sif/load.go generated vendored Normal file
View file

@ -0,0 +1,210 @@
package sif
import (
"bufio"
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/sylabs/sif/v2/pkg/sif"
)
// injectedScriptTargetPath is the path injectedScript should be written to in the created image.
const injectedScriptTargetPath = "/podman/runscript"
// parseDefFile parses a SIF definition file from reader,
// and returns non-trivial contents of the %environment and %runscript sections.
func parseDefFile(reader io.Reader) ([]string, []string, error) {
type parserState int
const (
parsingOther parserState = iota
parsingEnvironment
parsingRunscript
)
environment := []string{}
runscript := []string{}
state := parsingOther
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
switch {
case s == `%environment`:
state = parsingEnvironment
case s == `%runscript`:
state = parsingRunscript
case strings.HasPrefix(s, "%"):
state = parsingOther
case state == parsingEnvironment:
if s != "" && !strings.HasPrefix(s, "#") {
environment = append(environment, s)
}
case state == parsingRunscript:
runscript = append(runscript, s)
default: // parsingOther: ignore the line
}
}
if err := scanner.Err(); err != nil {
return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err)
}
return environment, runscript, nil
}
// generateInjectedScript generates a shell script based on
// SIF definition file %environment and %runscript data, and returns it.
func generateInjectedScript(environment []string, runscript []string) []byte {
script := fmt.Sprintf("#!/bin/bash\n"+
"%s\n"+
"%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n"))
return []byte(script)
}
// processDefFile finds sif.DataDeffile in sifImage, if any,
// and returns:
// - the command to run
// - contents of a script to inject as injectedScriptTargetPath, or nil
func processDefFile(sifImage *sif.FileImage) (string, []byte, error) {
var environment, runscript []string
desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile))
if err == nil {
environment, runscript, err = parseDefFile(desc.GetReader())
if err != nil {
return "", nil, err
}
}
var command string
var injectedScript []byte
if len(environment) == 0 && len(runscript) == 0 {
command = "bash"
injectedScript = nil
} else {
injectedScript = generateInjectedScript(environment, runscript)
command = injectedScriptTargetPath
}
return command, injectedScript, nil
}
func writeInjectedScript(extractedRootPath string, injectedScript []byte) error {
if injectedScript == nil {
return nil
}
filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath)
parentDirPath := filepath.Dir(filePath)
if err := os.MkdirAll(parentDirPath, 0755); err != nil {
return fmt.Errorf("creating %s: %w", parentDirPath, err)
}
if err := os.WriteFile(filePath, injectedScript, 0755); err != nil {
return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
}
return nil
}
// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath.
// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use,
// if necessary.
func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error {
// It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
// for our use.
defer os.RemoveAll(extractedRootPath)
// Almost everything in extractedRootPath comes from squashFSPath.
conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
extractedRootPath, squashFSPath, extractedRootPath, tarPath)
script := "#!/bin/sh\n" + conversionCommand + "\n"
if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
return err
}
defer os.Remove(scriptPath)
// On top of squashFSPath, we only add injectedScript, if necessary.
if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil {
return err
}
logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand)
cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("converting image: %w, output: %s", err, string(output))
}
logrus.Debugf("... finished converting squashfs to tar")
return nil
}
// convertSIFToElements processes sifImage and creates/returns
// the relevant elements for constructing an OCI-like image:
// - A path to a tar file containing a root filesystem,
// - A command to run.
// The returned tar file path is inside tempDir, which can be assumed to be empty
// at start, and is exclusively used by the current process (i.e. it is safe
// to use hard-coded relative paths within it).
func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
// We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive,
// so we can just hard-code a set of unique values here.
// We create and/or manage cleanup of these two paths.
squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
tarPath := filepath.Join(tempDir, "rootfs.tar")
// We only allocate these paths, the user is responsible for cleaning them up.
extractedRootPath := filepath.Join(tempDir, "rootfs")
scriptPath := filepath.Join(tempDir, "script")
succeeded := false
// It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
// for our use.
// Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need
// to run both creation and consumption of extractedRootPath in the same fakeroot context.
// So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2
// uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time.
// That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser
// reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy.
defer os.Remove(squashFSPath)
defer func() {
if !succeeded {
os.Remove(tarPath)
}
}()
command, injectedScript, err := processDefFile(sifImage)
if err != nil {
return "", nil, err
}
rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys))
if err != nil {
return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err)
}
// TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4
// has an -o option that allows extracting a squashfs from the SIF file directly,
// but that version is not currently available in RHEL 8.
logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
if err := func() error { // A scope for defer
f, err := os.Create(squashFSPath)
if err != nil {
return err
}
defer f.Close()
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
return err
}
return nil
}(); err != nil {
return "", nil, err
}
logrus.Debugf("... finished creating a temporary squashfs image")
if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil {
return "", nil, err
}
succeeded = true
return tarPath, []string{command}, nil
}

206
vendor/github.com/containers/image/v5/sif/src.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
package sif
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/sylabs/sif/v2/pkg/sif"
)
type sifImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
impl.NoSignatures
impl.DoesNotAffectLayerInfosForCopy
stubs.NoGetBlobAtInitialize
ref sifReference
workDir string
layerDigest digest.Digest
layerSize int64
layerFile string
config []byte
configDigest digest.Digest
manifest []byte
}
// getBlobInfo returns the digest, and size of the provided file.
func getBlobInfo(path string) (digest.Digest, int64, error) {
f, err := os.Open(path)
if err != nil {
return "", -1, fmt.Errorf("opening %q for reading: %w", path, err)
}
defer f.Close()
// TODO: Instead of writing the tar file to disk, and reading
// it here again, stream the tar file to a pipe and
// compute the digest while writing it to disk.
logrus.Debugf("Computing a digest of the SIF conversion output...")
digester := digest.Canonical.Digester()
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(digester.Hash(), f)
if err != nil {
return "", -1, fmt.Errorf("reading %q: %w", path, err)
}
digest := digester.Digest()
logrus.Debugf("... finished computing the digest of the SIF conversion output")
return digest, size, nil
}
// newImageSource returns an ImageSource for reading from an existing directory.
// newImageSource extracts SIF objects and saves them in a temp directory.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (private.ImageSource, error) {
sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY))
if err != nil {
return nil, fmt.Errorf("loading SIF file: %w", err)
}
defer func() {
_ = sifImg.UnloadContainer()
}()
workDir, err := tmpdir.MkDirBigFileTemp(sys, "sif")
if err != nil {
return nil, fmt.Errorf("creating temp directory: %w", err)
}
succeeded := false
defer func() {
if !succeeded {
os.RemoveAll(workDir)
}
}()
layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir)
if err != nil {
return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err)
}
layerDigest, layerSize, err := getBlobInfo(layerPath)
if err != nil {
return nil, fmt.Errorf("gathering blob information: %w", err)
}
created := sifImg.ModifiedAt()
config := imgspecv1.Image{
Created: &created,
Platform: imgspecv1.Platform{
Architecture: sifImg.PrimaryArch(),
OS: "linux",
},
Config: imgspecv1.ImageConfig{
Cmd: commandLine,
},
RootFS: imgspecv1.RootFS{
Type: "layers",
DiffIDs: []digest.Digest{layerDigest},
},
History: []imgspecv1.History{
{
Created: &created,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
Comment: "imported from SIF, uuid: " + sifImg.ID(),
},
{
Created: &created,
CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]",
EmptyLayer: true,
},
},
}
configBytes, err := json.Marshal(&config)
if err != nil {
return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err)
}
configDigest := digest.Canonical.FromBytes(configBytes)
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageManifest,
Config: imgspecv1.Descriptor{
Digest: configDigest,
Size: int64(len(configBytes)),
MediaType: imgspecv1.MediaTypeImageConfig,
},
Layers: []imgspecv1.Descriptor{{
Digest: layerDigest,
Size: layerSize,
MediaType: imgspecv1.MediaTypeImageLayer,
}},
}
manifestBytes, err := json.Marshal(&manifest)
if err != nil {
return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err)
}
succeeded = true
s := &sifImageSource{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
HasThreadSafeGetBlob: true,
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
ref: ref,
workDir: workDir,
layerDigest: layerDigest,
layerSize: layerSize,
layerFile: layerPath,
config: configBytes,
configDigest: configDigest,
manifest: manifestBytes,
}
s.Compat = impl.AddCompat(s)
return s, nil
}
// Reference returns the reference used to set up this source.
func (s *sifImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *sifImageSource) Close() error {
return os.RemoveAll(s.workDir)
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
switch info.Digest {
case s.configDigest:
return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil
case s.layerDigest:
reader, err := os.Open(s.layerFile)
if err != nil {
return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err)
}
return reader, s.layerSize, nil
default:
return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String())
}
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
return nil, "", errors.New("manifest lists are not supported by the sif transport")
}
return s.manifest, imgspecv1.MediaTypeImageManifest, nil
}

160
vendor/github.com/containers/image/v5/sif/transport.go generated vendored Normal file
View file

@ -0,0 +1,160 @@
package sif
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
)
func init() {
transports.Register(Transport)
}
// Transport is an ImageTransport for SIF images.
var Transport = sifTransport{}
type sifTransport struct{}
func (t sifTransport) Name() string {
return "sif"
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) {
return NewReference(reference)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error {
if !strings.HasPrefix(scope, "/") {
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
if scope == "/" {
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
}
cleaned := filepath.Clean(scope)
if cleaned != scope {
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
}
return nil
}
// sifReference is an ImageReference for SIF images.
type sifReference struct {
// Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
// Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on.
// Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid
// being exposed to symlinks and renames in the parent directories to the working directory).
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
file string // As specified by the user. May be relative, contain symlinks, etc.
resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
}
// There is no sif.ParseReference because it is rather pointless.
// Callers who need a transport-independent interface will go through
// sifTransport.ParseReference; callers who intentionally deal with SIF files
// can use sif.NewReference.
// NewReference returns an image file reference for a specified path.
func NewReference(file string) (types.ImageReference, error) {
// We do not expose an API supplying the resolvedFile; we could, but recomputing it
// is generally cheap enough that we prefer being confident about the properties of resolvedFile.
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
if err != nil {
return nil, err
}
return sifReference{file: file, resolvedFile: resolved}, nil
}
func (ref sifReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref sifReference) StringWithinTransport() string {
return ref.file
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref sifReference) DockerReference() reference.Named {
return nil
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
// (i.e. various references with exactly the same semantics should return the same configuration identity)
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref sifReference) PolicyConfigurationIdentity() string {
return ref.resolvedFile
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref sifReference) PolicyConfigurationNamespaces() []string {
res := []string{}
path := ref.resolvedFile
for {
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 || lastSlash == 0 {
break
}
path = path[:lastSlash]
res = append(res, path)
}
// Note that we do not include "/"; it is redundant with the default "" global default,
// and rejected by sifTransport.ValidatePolicyConfigurationScope above.
return res
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(ctx, sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return nil, errors.New(`"sif:" locations can only be read from, not written to`)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
return errors.New("Deleting images not implemented for sif: images")
}

View file

@ -0,0 +1,958 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sync"
"sync/atomic"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
var (
// ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
// Deprecated: PutBlob() doesn't do this any more (it just accepts the callers value),
// and there is no known user of this error.
ErrBlobDigestMismatch = errors.New("blob digest mismatch")
// ErrBlobSizeMismatch is returned when PutBlob() is given a blob
// with an expected size that doesn't match the reader.
ErrBlobSizeMismatch = errors.New("blob size mismatch")
)
type storageImageDestination struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.ImplementsPutBlobPartial
stubs.AlwaysSupportsSignatures
imageRef storageReference
directory string // Temporary directory where we store blobs until Commit() time
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
manifestDigest digest.Digest // Valid if len(manifest) != 0
signatures []byte // Signature contents, temporary
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
// A storage destination may be used concurrently. Accesses are
// serialized via a mutex. Please refer to the individual comments
// below for details.
lock sync.Mutex
// Mapping from layer (by index) to the associated ID in the storage.
// It's protected *implicitly* since `commitLayer()`, at any given
// time, can only be executed by *one* goroutine. Please refer to
// `queueOrCommit()` for further details on how the single-caller
// guarantee is implemented.
indexToStorageID map[int]*string
// All accesses to below data are protected by `lock` which is made
// *explicit* in the code.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
}
// addedLayerInfo records data about a layer to use in this image.
type addedLayerInfo struct {
digest digest.Digest
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
}
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
// it's time to Commit() the image
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
directory, err := tmpdir.MkDirBigFileTemp(sys, "storage")
if err != nil {
return nil, fmt.Errorf("creating a temporary directory: %w", err)
}
dest := &storageImageDestination{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
SupportedManifestMIMETypes: []string{
imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
},
// We ultimately have to decompress layers to populate trees on disk
// and need to explicitly ask for it here, so that the layers' MIME
// types can be set accordingly.
DesiredLayerCompression: types.PreserveOriginal,
AcceptsForeignLayerURLs: false,
MustMatchRuntimeOS: true,
IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest
HasThreadSafePutBlob: true,
}),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
}
dest.Compat = impl.AddCompat(dest)
return dest, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (s *storageImageDestination) Reference() types.ImageReference {
return s.imageRef
}
// Close cleans up the temporary directory and additional layer store handlers.
func (s *storageImageDestination) Close() error {
for _, al := range s.blobAdditionalLayer {
al.Release()
}
for _, v := range s.diffOutputs {
if v.Target != "" {
_ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
}
}
return os.RemoveAll(s.directory)
}
func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
if err != nil {
return info, err
}
if options.IsConfig || options.LayerIndex == nil {
return info, nil
}
return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
}
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
if blobinfo.Digest != "" {
if err := blobinfo.Digest.Validate(); err != nil {
return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
}
}
// Set up to digest the blob if necessary, and count its size while saving it to a file.
filename := s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
}
defer file.Close()
counter := ioutils.NewWriteCounter(file)
stream = io.TeeReader(stream, counter)
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
decompressed, err := archive.DecompressStream(stream)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
}
diffID := digest.Canonical.Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
_, err = io.Copy(diffID.Hash(), decompressed)
decompressed.Close()
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
// Determine blob properties, and fail if information that we were given about the blob
// is known to be incorrect.
blobDigest := digester.Digest()
blobSize := blobinfo.Size
if blobSize < 0 {
blobSize = counter.Count
} else if blobinfo.Size != counter.Count {
return private.UploadedBlob{}, ErrBlobSizeMismatch
}
// Record information about the blob.
s.lock.Lock()
s.blobDiffIDs[blobDigest] = diffID.Digest()
s.fileSizes[blobDigest] = counter.Count
s.filenames[blobDigest] = filename
s.lock.Unlock()
// This is safe because we have just computed diffID, and blobDigest was either computed
// by us, or validated by the caller (usually copy.digestingReader).
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return private.UploadedBlob{
Digest: blobDigest,
Size: blobSize,
}, nil
}
type zstdFetcher struct {
chunkAccessor private.BlobChunkAccessor
ctx context.Context
blobInfo types.BlobInfo
}
// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
newChunks := make([]private.ImageSourceChunk, 0, len(chunks))
for _, v := range chunks {
i := private.ImageSourceChunk{
Offset: v.Offset,
Length: v.Length,
}
newChunks = append(newChunks, i)
}
rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
if _, ok := err.(private.BadPartialRequestError); ok {
err = chunked.ErrBadRequest{}
}
return rc, errs, err
}
// PutBlobPartial attempts to create a blob using the data that is already present
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
fetcher := zstdFetcher{
chunkAccessor: chunkAccessor,
ctx: ctx,
blobInfo: srcInfo,
}
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
if err != nil {
return private.UploadedBlob{}, err
}
out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
if err != nil {
return private.UploadedBlob{}, err
}
blobDigest := srcInfo.Digest
s.lock.Lock()
s.blobDiffIDs[blobDigest] = blobDigest
s.fileSizes[blobDigest] = 0
s.filenames[blobDigest] = ""
s.diffOutputs[blobDigest] = out
s.lock.Unlock()
return private.UploadedBlob{
Digest: blobDigest,
Size: srcInfo.Size,
}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
return false, private.ReusedBlob{}, nil
}
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
if err != nil || !reused || options.LayerIndex == nil {
return reused, info, err
}
return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()
if options.SrcRef != nil {
// Check if we have the layer in the underlying additional layer store.
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String())
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
} else if err == nil {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[digest] = aLayer
return true, private.ReusedBlob{
Digest: digest,
Size: aLayer.CompressedSize(),
}, nil
}
}
if digest == "" {
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
}
if err := digest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[digest]; ok {
return true, private.ReusedBlob{
Digest: digest,
Size: size,
}, nil
}
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err)
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].UncompressedSize,
}, nil
}
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].CompressedSize,
}, nil
}
// Does the blob correspond to a known DiffID which we already have available?
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
if options.CanSubstitute || size != -1 {
if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest {
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if len(layers) > 0 {
if size != -1 {
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: size,
}, nil
}
if !options.CanSubstitute {
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
}
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
}, nil
}
}
}
// Nope, we don't have it.
return false, private.ReusedBlob{}, nil
}
// computeID computes a recommended image ID based on information we have so far. If
// the manifest is not of a type that we recognize, we return an empty value, indicating
// that since we don't have a recommendation, a random ID should be used if one needs
// to be allocated.
func (s *storageImageDestination) computeID(m manifest.Manifest) string {
// Build the diffID list. We need the decompressed sums that we've been calculating to
// fill in the DiffIDs. It's expected (but not enforced by us) that the number of
// diffIDs corresponds to the number of non-EmptyLayer entries in the history.
var diffIDs []digest.Digest
switch m := m.(type) {
case *manifest.Schema1:
// Build a list of the diffIDs we've generated for the non-throwaway FS layers,
// in reverse of the order in which they were originally listed.
for i, compat := range m.ExtractedV1Compatibility {
if compat.ThrowAway {
continue
}
blobSum := m.FSLayers[i].BlobSum
diffID, ok := s.blobDiffIDs[blobSum]
if !ok {
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
return ""
}
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
}
case *manifest.Schema2, *manifest.OCI1:
// We know the ID calculation for these formats doesn't actually use the diffIDs,
// so we don't need to populate the diffID list.
default:
return ""
}
id, err := m.ImageID(diffIDs)
if err != nil {
return ""
}
return id
}
// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
// information out of it for Inspect().
func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
if info.Digest == "" {
return nil, errors.New(`no digest supplied when reading blob`)
}
if err := info.Digest.Validate(); err != nil {
return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
}
// Assume it's a file, since we're only calling this from a place that expects to read files.
if filename, ok := s.filenames[info.Digest]; ok {
contents, err2 := os.ReadFile(filename)
if err2 != nil {
return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
}
return contents, nil
}
// If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
return nil, errors.New("blob not found")
}
// queueOrCommit queues the specified layer to be committed to the storage.
// If no other goroutine is already committing layers, the layer and all
// subsequent layers (if already queued) will be committed to the storage.
func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error {
// NOTE: whenever the code below is touched, make sure that all code
// paths unlock the lock and to unlock it exactly once.
//
// Conceptually, the code is divided in two stages:
//
// 1) Queue in work by marking the layer as ready to be committed.
// If at least one previous/parent layer with a lower index has
// not yet been committed, return early.
//
// 2) Process the queued-in work by committing the "ready" layers
// in sequence. Make sure that more items can be queued-in
// during the comparatively I/O expensive task of committing a
// layer.
//
// The conceptual benefit of this design is that caller can continue
// pulling layers after an early return. At any given time, only one
// caller is the "worker" routine committing layers. All other routines
// can continue pulling and queuing in layers.
s.lock.Lock()
s.indexToAddedLayerInfo[index] = info
// We're still waiting for at least one previous/parent layer to be
// committed, so there's nothing to do.
if index != s.currentIndex {
s.lock.Unlock()
return nil
}
for {
info, ok := s.indexToAddedLayerInfo[index]
if !ok {
break
}
s.lock.Unlock()
// Note: commitLayer locks on-demand.
if err := s.commitLayer(index, info, -1); err != nil {
return err
}
s.lock.Lock()
index++
}
// Set the index at the very end to make sure that only one routine
// enters stage 2).
s.currentIndex = index
s.lock.Unlock()
return nil
}
// commitLayer commits the specified layer with the given index to the storage.
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
//
// Note that the previous layer is expected to already be committed.
//
// Caution: this function must be called without holding `s.lock`. Callers
// must guarantee that, at any given time, at most one goroutine may execute
// `commitLayer()`.
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
// Already committed? Return early.
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
return nil
}
// Start with an empty string or the previous layer ID. Note that
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
// given time. Hence, we don't need to lock accesses.
var lastLayer string
if prev := s.indexToStorageID[index-1]; prev != nil {
lastLayer = *prev
}
// Carry over the previous ID for empty non-base layers.
if info.emptyLayer {
s.indexToStorageID[index] = &lastLayer
return nil
}
// Check if there's already a layer with the ID that we'd give to the result of applying
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
s.lock.Lock()
diffID, haveDiffID := s.blobDiffIDs[info.digest]
s.lock.Unlock()
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// that relies on using a blob digest that has never been seen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", info.digest)
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
Cache: none.NoCache,
CanSubstitute: false,
})
if err != nil {
return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
}
if !has {
return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
}
diffID, haveDiffID = s.blobDiffIDs[info.digest]
if !haveDiffID {
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
}
}
id := diffID.Hex()
if lastLayer != "" {
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
}
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.
lastLayer = layer.ID
s.indexToStorageID[index] = &lastLayer
return nil
}
s.lock.Lock()
diffOutput, ok := s.diffOutputs[info.digest]
s.lock.Unlock()
if ok {
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
if err != nil {
return err
}
// FIXME: what to do with the uncompressed digest?
diffOutput.UncompressedDigest = info.digest
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
_ = s.imageRef.transport.store.Delete(layer.ID)
return err
}
s.indexToStorageID[index] = &layer.ID
return nil
}
s.lock.Lock()
al, ok := s.blobAdditionalLayer[info.digest]
s.lock.Unlock()
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return fmt.Errorf("failed to put layer from digest and labels: %w", err)
}
lastLayer = layer.ID
s.indexToStorageID[index] = &lastLayer
return nil
}
// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
s.lock.Lock()
filename, ok := s.filenames[info.digest]
s.lock.Unlock()
if !ok {
// Try to find the layer with contents matching that blobsum.
layer := ""
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
} else {
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
}
}
if layer == "" {
return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
}
// Read the layer's contents.
noCompression := archive.Uncompressed
diffOptions := &storage.DiffOptions{
Compression: &noCompression,
}
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
if err2 != nil {
return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
}
// Copy the layer diff to a file. Diff() takes a lock that it holds
// until the ReadCloser that it returns is closed, and PutLayer() wants
// the same lock, so the diff can't just be directly streamed from one
// to the other.
filename = s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
if err != nil {
diff.Close()
return fmt.Errorf("creating temporary file %q: %w", filename, err)
}
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using
// ctx.Done().
_, err = io.Copy(file, diff)
diff.Close()
file.Close()
if err != nil {
return fmt.Errorf("storing blob to file %q: %w", filename, err)
}
// Make sure that we can find this file later, should we need the layer's
// contents again.
s.lock.Lock()
s.filenames[info.digest] = filename
s.lock.Unlock()
}
// Read the cached blob and use it as a diff.
file, err := os.Open(filename)
if err != nil {
return fmt.Errorf("opening file %q: %w", filename, err)
}
defer file.Close()
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
OriginalDigest: info.digest,
UncompressedDigest: diffID,
}, file)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
}
s.indexToStorageID[index] = &layer.ID
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
// original manifest list digest, if desired.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
if len(s.manifest) == 0 {
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
}
toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return fmt.Errorf("retrieving top-level manifest: %w", err)
}
// If the name we're saving to includes a digest, then check that the
// manifests that we're about to save all either match the one from the
// unparsedToplevel, or match the digest in the name that we're using.
if s.imageRef.named != nil {
if digested, ok := s.imageRef.named.(reference.Digested); ok {
matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
if err != nil {
return err
}
if !matches {
matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest())
if err != nil {
return err
}
}
if !matches {
return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest())
}
}
}
// Find the list of layer blobs.
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
if err != nil {
return fmt.Errorf("parsing manifest: %w", err)
}
layerBlobs := man.LayerInfos()
// Extract, commit, or find the layers.
for i, blob := range layerBlobs {
if err := s.commitLayer(i, addedLayerInfo{
digest: blob.Digest,
emptyLayer: blob.EmptyLayer,
}, blob.Size); err != nil {
return err
}
}
var lastLayer string
if len(layerBlobs) > 0 { // Can happen when using caches
prev := s.indexToStorageID[len(layerBlobs)-1]
if prev == nil {
return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
}
lastLayer = *prev
}
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
// was originally created, in case we're just copying it. If not, no harm done.
options := &storage.ImageOptions{}
if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
logrus.Debugf("setting image creation date to %s", inspect.Created)
options.CreationDate = *inspect.Created
}
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
// we just need to screen out the ones that are actually layers to get the list of non-layers.
dataBlobs := set.New[digest.Digest]()
for blob := range s.filenames {
dataBlobs.Add(blob)
}
for _, layerBlob := range layerBlobs {
dataBlobs.Delete(layerBlob.Digest)
}
for _, blob := range dataBlobs.Values() {
v, err := os.ReadFile(s.filenames[blob])
if err != nil {
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: blob.String(),
Data: v,
Digest: digest.Canonical.FromBytes(v),
})
}
// Set up to save the unparsedToplevel's manifest if it differs from
// the per-platform one, which is saved below.
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
manifestDigest, err := manifest.Digest(toplevelManifest)
if err != nil {
return fmt.Errorf("digesting top-level manifest: %w", err)
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: manifestBigDataKey(manifestDigest),
Data: toplevelManifest,
Digest: manifestDigest,
})
}
// Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
// and using storage.ImageDigestBigDataKey for future users that dont specify any digest and for compatibility with older readers.
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: manifestBigDataKey(s.manifestDigest),
Data: s.manifest,
Digest: s.manifestDigest,
})
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: storage.ImageDigestBigDataKey,
Data: s.manifest,
Digest: s.manifestDigest,
})
// Set up to save the signatures, if we have any.
if len(s.signatures) > 0 {
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: "signatures",
Data: s.signatures,
Digest: digest.Canonical.FromBytes(s.signatures),
})
}
for instanceDigest, signatures := range s.signatureses {
options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: signatureBigDataKey(instanceDigest),
Data: signatures,
Digest: digest.Canonical.FromBytes(signatures),
})
}
// Set up to save our metadata.
metadata, err := json.Marshal(s)
if err != nil {
return fmt.Errorf("encoding metadata for image: %w", err)
}
if len(metadata) != 0 {
options.Metadata = string(metadata)
}
// Create the image record, pointing to the most-recently added layer.
intendedID := s.imageRef.id
if intendedID == "" {
intendedID = s.computeID(man)
}
oldNames := []string{}
img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
if err != nil {
if !errors.Is(err, storage.ErrDuplicateID) {
logrus.Debugf("error creating image: %q", err)
return fmt.Errorf("creating image %q: %w", intendedID, err)
}
img, err = s.imageRef.transport.store.Image(intendedID)
if err != nil {
return fmt.Errorf("reading image %q: %w", intendedID, err)
}
if img.TopLayer != lastLayer {
logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
return fmt.Errorf("image with ID %q already exists, but uses a different top layer: %w", intendedID, storage.ErrDuplicateID)
}
logrus.Debugf("reusing image ID %q", img.ID)
oldNames = append(oldNames, img.Names...)
// set the data items and metadata on the already-present image
// FIXME: this _replaces_ any "signatures" blobs and their
// sizes (tracked in the metadata) which might have already
// been present with new values, when ideally we'd find a way
// to merge them since they all apply to the same image
for _, data := range options.BigData {
if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil {
logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err)
return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err)
}
}
if options.Metadata != "" {
if err := s.imageRef.transport.store.SetMetadata(img.ID, options.Metadata); err != nil {
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
}
logrus.Debugf("saved image metadata %q", options.Metadata)
}
} else {
logrus.Debugf("created new image ID %q with metadata %q", img.ID, options.Metadata)
}
// Clean up the unfinished image on any error.
// (Is this the right thing to do if the image has existed before?)
commitSucceeded := false
defer func() {
if !commitSucceeded {
logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames)
if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil {
logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err)
}
}
}()
// Add the reference's name on the image. We don't need to worry about avoiding duplicate
// values because AddNames() will deduplicate the list that we pass to it.
if name := s.imageRef.DockerReference(); name != nil {
if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil {
return fmt.Errorf("adding names %v to image %q: %w", name, img.ID, err)
}
logrus.Debugf("added name %q to image %q", name, img.ID)
}
commitSucceeded = true
return nil
}
// PutManifest writes the manifest to the destination.
func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
digest, err := manifest.Digest(manifestBlob)
if err != nil {
return err
}
s.manifest = slices.Clone(manifestBlob)
s.manifestDigest = digest
return nil
}
// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// MUST be called after PutManifest (signatures may reference manifest contents).
func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
sizes := []int{}
sigblob := []byte{}
for _, sigWithFormat := range signatures {
sig, err := signature.Blob(sigWithFormat)
if err != nil {
return err
}
sizes = append(sizes, len(sig))
sigblob = append(sigblob, sig...)
}
if instanceDigest == nil {
s.signatures = sigblob
s.SignatureSizes = sizes
if len(s.manifest) > 0 {
manifestDigest := s.manifestDigest
instanceDigest = &manifestDigest
}
}
if instanceDigest != nil {
s.signatureses[*instanceDigest] = sigblob
s.SignaturesSizes[*instanceDigest] = sizes
}
return nil
}

View file

@ -0,0 +1,59 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
"context"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
)
var (
// ErrNoSuchImage is returned when we attempt to access an image which
// doesn't exist in the storage area.
ErrNoSuchImage = storage.ErrNotAnImage
)
type storageImageCloser struct {
types.ImageCloser
size int64
}
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
func manifestBigDataKey(digest digest.Digest) string {
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
}
// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
func signatureBigDataKey(digest digest.Digest) string {
return "signature-" + digest.Encoded()
}
// Size() returns the previously-computed size of the image, with no error.
func (s *storageImageCloser) Size() (int64, error) {
return s.size, nil
}
// newImage creates an image that also knows its size
func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) {
src, err := newImageSource(sys, s)
if err != nil {
return nil, err
}
img, err := image.FromSource(ctx, sys, src)
if err != nil {
return nil, err
}
size, err := src.getSize()
if err != nil {
return nil, err
}
return &storageImageCloser{ImageCloser: img, size: size}, nil
}

View file

@ -0,0 +1,316 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
"context"
"fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
// value hex-encoded into a 64-character string, and a reference to a Store
// where an image is, or would be, kept.
// Either "named" or "id" must be set.
type storageReference struct {
transport storageTransport
named reference.Named // may include a tag and/or a digest
id string
}
func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) {
if named == nil && id == "" {
return nil, ErrInvalidReference
}
if named != nil && reference.IsNameOnly(named) {
return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference)
}
if id != "" {
if err := validateImageID(id); err != nil {
return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference)
}
}
// We take a copy of the transport, which contains a pointer to the
// store that it used for resolving this reference, so that the
// transport that we'll return from Transport() won't be affected by
// further calls to the original transport's SetStore() method.
return &storageReference{
transport: transport,
named: named,
id: id,
}, nil
}
// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
repo := ref.Name()
return slices.ContainsFunc(image.Names, func(name string) bool {
if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo {
return true
}
return false
})
}
// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a
// multi-arch manifest that matches the passed-in digest, and the image is the per-platform
// image instance that matches sys.
//
// See the comment in storageReference.ResolveImage explaining why
// this check is necessary.
func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool {
// Load the manifest that matches the specified digest.
// We don't need to care about storage.ImageDigestBigDataKey because
// manifests lists are only stored into storage by c/image versions
// that know about manifestBigDataKey, and only using that key.
key := manifestBigDataKey(manifestDigest)
manifestBytes, err := store.ImageBigData(img.ID, key)
if err != nil {
return false
}
// The manifest is either a list, or not a list. If it's a list, find
// the digest of the instance that matches the current system, and try
// to load that manifest from the image record, and use it.
manifestType := manifest.GuessMIMEType(manifestBytes)
if !manifest.MIMETypeIsMultiImage(manifestType) {
// manifestDigest directly specifies a per-platform image, so we aren't
// choosing among different variants.
return false
}
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
return false
}
chosenInstance, err := list.ChooseInstance(sys)
if err != nil {
return false
}
key = manifestBigDataKey(chosenInstance)
_, err = store.ImageBigData(img.ID, key)
return err == nil // true if img.ID is based on chosenInstance.
}
// Resolve the reference's name to an image ID in the store, if there's already
// one present with the same name or ID, and return the image.
//
// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) {
var loadedImage *storage.Image
if s.id == "" && s.named != nil {
// Look for an image that has the expanded reference name as an explicit Name value.
image, err := s.transport.store.Image(s.named.String())
if image != nil && err == nil {
loadedImage = image
s.id = image.ID
}
}
if s.id == "" && s.named != nil {
if digested, ok := s.named.(reference.Digested); ok {
// Look for an image with the specified digest that has the same name,
// though possibly with a different tag or digest, as a Name value, so
// that the canonical reference can be implicitly resolved to the image.
//
// Typically there should be at most one such image, because the same
// manifest digest implies the same config, and we choose the storage ID
// based on the config (deduplicating images), except:
// - the user can explicitly specify an ID when creating the image.
// In this case we don't have a preference among the alternatives.
// - when pulling an image from a multi-platform manifest list, we also
// store the manifest list in the image; this allows referencing a
// per-platform image using the manifest list digest, but that also
// means that we can have multiple genuinely different images in the
// storage matching the same manifest list digest (if pulled using different
// SystemContext.{OS,Architecture,Variant}Choice to the same storage).
// In this case we prefer the image matching the current SystemContext.
images, err := s.transport.store.ImagesByDigest(digested.Digest())
if err == nil && len(images) > 0 {
for _, image := range images {
if imageMatchesRepo(image, s.named) {
if loadedImage == nil || multiArchImageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) {
loadedImage = image
s.id = image.ID
}
}
}
}
}
}
if s.id == "" {
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
}
if loadedImage == nil {
img, err := s.transport.store.Image(s.id)
if err != nil {
return nil, fmt.Errorf("reading image %q: %w", s.id, err)
}
loadedImage = img
}
if s.named != nil {
if !imageMatchesRepo(loadedImage, s.named) {
logrus.Errorf("no image matching reference %q found", s.StringWithinTransport())
return nil, ErrNoSuchImage
}
}
// Default to having the image digest that we hand back match the most recently
// added manifest...
if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok {
loadedImage.Digest = digest
}
// ... unless the named reference says otherwise, and it matches one of the digests
// in the image. For those cases, set the Digest field to that value, for the
// sake of older consumers that don't know there's a whole list in there now.
if s.named != nil {
if digested, ok := s.named.(reference.Digested); ok {
digest := digested.Digest()
if slices.Contains(loadedImage.Digests, digest) {
loadedImage.Digest = digest
}
}
}
return loadedImage, nil
}
// Return a Transport object that defaults to using the same store that we used
// to build this reference object.
func (s storageReference) Transport() types.ImageTransport {
return &storageTransport{
store: s.transport.store,
defaultUIDMap: s.transport.defaultUIDMap,
defaultGIDMap: s.transport.defaultGIDMap,
}
}
// Return a name with a tag or digest, if we have either, else return it bare.
func (s storageReference) DockerReference() reference.Named {
return s.named
}
// Return a name with a tag, prefixed with the graph root and driver name, to
// disambiguate between images which may be present in multiple stores and
// share only their names.
func (s storageReference) StringWithinTransport() string {
optionsList := ""
options := s.transport.store.GraphOptions()
if len(options) > 0 {
optionsList = ":" + strings.Join(options, ",")
}
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
if s.named != nil {
res += s.named.String()
}
if s.id != "" {
res += "@" + s.id
}
return res
}
func (s storageReference) PolicyConfigurationIdentity() string {
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
if s.named != nil {
res += s.named.String()
}
if s.id != "" {
res += "@" + s.id
}
return res
}
// Also accept policy that's tied to the combination of the graph root and
// driver name, to apply to all images stored in the Store, and to just the
// graph root, in case we're using multiple drivers in the same directory for
// some reason.
func (s storageReference) PolicyConfigurationNamespaces() []string {
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
namespaces := []string{}
if s.named != nil {
if s.id != "" {
// The reference without the ID is also a valid namespace.
namespaces = append(namespaces, storeSpec+s.named.String())
}
tagged, isTagged := s.named.(reference.Tagged)
_, isDigested := s.named.(reference.Digested)
if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace.
namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag())
}
components := strings.Split(s.named.Name(), "/")
for len(components) > 0 {
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
components = components[:len(components)-1]
}
}
namespaces = append(namespaces, storeSpec)
namespaces = append(namespaces, driverlessStoreSpec)
return namespaces
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return newImage(ctx, sys, s)
}
func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
img, err := s.resolveImage(sys)
if err != nil {
return err
}
layers, err := s.transport.store.DeleteImage(img.ID, true)
if err == nil {
logrus.Debugf("deleted image %q", img.ID)
for _, layer := range layers {
logrus.Debugf("deleted layer %q", layer)
}
}
return err
}
func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(sys, s)
}
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(sys, s)
}
// ResolveReference finds the underlying storage image for a storage.Transport reference.
// It returns that image, and an updated reference which can be used to refer back to the _same_
// image again.
//
// This matters if the input reference contains a tagged name; the destination of the tag can
// move in local storage. The updated reference returned by this function contains the resolved
// image ID, so later uses of that updated reference will either continue to refer to the same
// image, or fail.
//
// Note that it _is_ possible for the later uses to fail, either because the image was removed
// completely, or because the name used in the reference was untaged (even if the underlying image
// ID still exists in local storage).
//
// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) {
sref, ok := ref.(*storageReference)
if !ok {
return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(),
transports.ImageName(ref))
}
clone := *sref // A shallow copy we can update
img, err := clone.resolveImage(nil)
if err != nil {
return nil, nil, err
}
return clone, img, nil
}

View file

@ -0,0 +1,403 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"sync"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
type storageImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoGetBlobAtInitialize
imageRef storageReference
image *storage.Image
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
}
// newImageSource sets up an image for reading.
func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
// First, locate the image.
img, err := imageRef.resolveImage(sys)
if err != nil {
return nil, err
}
// Build the reader object.
image := &storageImageSource{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
HasThreadSafeGetBlob: true,
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef),
imageRef: imageRef,
systemContext: sys,
image: img,
layerPosition: make(map[digest.Digest]int),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
}
image.Compat = impl.AddCompat(image)
if img.Metadata != "" {
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
return nil, fmt.Errorf("decoding metadata for source image: %w", err)
}
}
return image, nil
}
// Reference returns the image reference that we used to find this image.
func (s *storageImageSource) Reference() types.ImageReference {
return s.imageRef
}
// Close cleans up any resources we tied up while reading the image.
func (s *storageImageSource) Close() error {
return nil
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
// We need a valid digest value.
digest := info.Digest
err = digest.Validate()
if err != nil {
return nil, 0, err
}
if digest == image.GzippedEmptyLayerDigest {
return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
}
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
// callers should try to retrieve layers using their uncompressed digests, so no need to
// check if they're using one of the compressed digests, which we can't reproduce anyway.
layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
// If it's not a layer, then it must be a data item.
if len(layers) == 0 {
b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, digest.String())
if err != nil {
return nil, 0, err
}
r := bytes.NewReader(b)
logrus.Debugf("exporting opaque data as blob %q", digest.String())
return io.NopCloser(r), int64(r.Len()), nil
}
// NOTE: the blob is first written to a temporary file and subsequently
// closed. The intention is to keep the time we own the storage lock
// as short as possible to allow other processes to access the storage.
rc, n, _, err = s.getBlobAndLayerID(digest, layers)
if err != nil {
return nil, 0, err
}
defer rc.Close()
tmpFile, err := tmpdir.CreateBigFileTemp(s.systemContext, "")
if err != nil {
return nil, 0, err
}
success := false
tmpFileRemovePending := true
defer func() {
if !success {
tmpFile.Close()
if tmpFileRemovePending {
os.Remove(tmpFile.Name())
}
}
}()
// On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically
// cleaned up on process termination (or if the caller forgets to invoke Close())
// On older versions of Windows we will have to fallback to relying on the caller to invoke Close()
if err := os.Remove(tmpFile.Name()); err != nil {
tmpFileRemovePending = false
}
if _, err := io.Copy(tmpFile, rc); err != nil {
return nil, 0, err
}
if _, err := tmpFile.Seek(0, io.SeekStart); err != nil {
return nil, 0, err
}
success = true
if tmpFileRemovePending {
return ioutils.NewReadCloserWrapper(tmpFile, func() error {
tmpFile.Close()
return os.Remove(tmpFile.Name())
}), n, nil
}
return tmpFile, n, nil
}
// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []storage.Layer) (rc io.ReadCloser, n int64, layerID string, err error) {
var layer storage.Layer
var diffOptions *storage.DiffOptions
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
// just go ahead and use the first one every time.
s.getBlobMutex.Lock()
i := s.layerPosition[digest]
s.layerPosition[digest] = i + 1
s.getBlobMutex.Unlock()
if len(layers) > 0 {
layer = layers[i%len(layers)]
}
// Force the storage layer to not try to match any compression that was used when the layer was first
// handed to it.
noCompression := archive.Uncompressed
diffOptions = &storage.DiffOptions{
Compression: &noCompression,
}
if layer.UncompressedSize < 0 {
n = -1
} else {
n = layer.UncompressedSize
}
logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, digest)
rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
if err != nil {
return nil, -1, "", err
}
return rc, n, layer.ID, err
}
// GetManifest() reads the image's manifest.
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
if instanceDigest != nil {
key := manifestBigDataKey(*instanceDigest)
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
if err != nil {
return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
}
return blob, manifest.GuessMIMEType(blob), err
}
if len(s.cachedManifest) == 0 {
// The manifest is stored as a big data item.
// Prefer the manifest corresponding to the user-specified digest, if available.
if s.imageRef.named != nil {
if digested, ok := s.imageRef.named.(reference.Digested); ok {
key := manifestBigDataKey(digested.Digest())
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
return nil, "", err
}
if err == nil {
s.cachedManifest = blob
}
}
}
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
if len(s.cachedManifest) == 0 {
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
if err != nil {
return nil, "", err
}
s.cachedManifest = cachedBlob
}
}
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
}
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
// the image, after they've been decompressed.
func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest)
if err != nil {
return nil, fmt.Errorf("reading image manifest for %q: %w", s.image.ID, err)
}
if manifest.MIMETypeIsMultiImage(manifestType) {
return nil, errors.New("can't copy layers for a manifest list (shouldn't be attempted)")
}
man, err := manifest.FromBlob(manifestBlob, manifestType)
if err != nil {
return nil, fmt.Errorf("parsing image manifest for %q: %w", s.image.ID, err)
}
uncompressedLayerType := ""
switch manifestType {
case imgspecv1.MediaTypeImageManifest:
uncompressedLayerType = imgspecv1.MediaTypeImageLayer
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
}
physicalBlobInfos := []types.BlobInfo{}
layerID := s.image.TopLayer
for layerID != "" {
layer, err := s.imageRef.transport.store.Layer(layerID)
if err != nil {
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
}
if layer.UncompressedDigest == "" {
return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID)
}
if layer.UncompressedSize < 0 {
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
}
blobInfo := types.BlobInfo{
Digest: layer.UncompressedDigest,
Size: layer.UncompressedSize,
MediaType: uncompressedLayerType,
}
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
layerID = layer.Parent
}
res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
if err != nil {
return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err)
}
return res, nil
}
// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
// but using layer data which we can actually produce — physicalInfos for non-empty layers,
// and image.GzippedEmptyLayer for empty ones.
// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
nextPhysical := 0
res := make([]types.BlobInfo, len(manifestInfos))
for i, mi := range manifestInfos {
if mi.EmptyLayer {
res[i] = types.BlobInfo{
Digest: image.GzippedEmptyLayerDigest,
Size: int64(len(image.GzippedEmptyLayer)),
MediaType: mi.MediaType,
}
} else {
if nextPhysical >= len(physicalInfos) {
return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
}
res[i] = physicalInfos[nextPhysical] // FIXME? Should we preserve more data in manifestInfos? Notably the current approach correctly removes zstd:chunked metadata annotations.
nextPhysical++
}
}
if nextPhysical != len(physicalInfos) {
return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
}
return res, nil
}
// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
var offset int
signatureBlobs := []byte{}
signatureSizes := s.SignatureSizes
key := "signatures"
instance := "default instance"
if instanceDigest != nil {
signatureSizes = s.SignaturesSizes[*instanceDigest]
key = signatureBigDataKey(*instanceDigest)
instance = instanceDigest.Encoded()
}
if len(signatureSizes) > 0 {
data, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
if err != nil {
return nil, fmt.Errorf("looking up signatures data for image %q (%s): %w", s.image.ID, instance, err)
}
signatureBlobs = data
}
res := []signature.Signature{}
for _, length := range signatureSizes {
if offset+length > len(signatureBlobs) {
return nil, fmt.Errorf("looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signatureBlobs), offset+length)
}
sig, err := signature.FromBlob(signatureBlobs[offset : offset+length])
if err != nil {
return nil, fmt.Errorf("parsing signature at (%d, %d): %w", offset, length, err)
}
res = append(res, sig)
offset += length
}
if offset != len(signatureBlobs) {
return nil, fmt.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatureBlobs)-offset)
}
return res, nil
}
// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
// signatures, and the uncompressed sizes of all of the image's layers.
func (s *storageImageSource) getSize() (int64, error) {
var sum int64
// Size up the data blobs.
dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID)
if err != nil {
return -1, fmt.Errorf("reading image %q: %w", s.image.ID, err)
}
for _, dataName := range dataNames {
bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName)
if err != nil {
return -1, fmt.Errorf("reading data blob size %q for %q: %w", dataName, s.image.ID, err)
}
sum += bigSize
}
// Add the signature sizes.
for _, sigSize := range s.SignatureSizes {
sum += int64(sigSize)
}
// Walk the layer list.
layerID := s.image.TopLayer
for layerID != "" {
layer, err := s.imageRef.transport.store.Layer(layerID)
if err != nil {
return -1, err
}
if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
}
sum += layer.UncompressedSize
if layer.Parent == "" {
break
}
layerID = layer.Parent
}
return sum, nil
}
// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
// signatures, and the uncompressed sizes of all of the image's layers.
func (s *storageImageSource) Size() (int64, error) {
return s.getSize()
}

View file

@ -0,0 +1,416 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package storage
import (
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
const (
minimumTruncatedIDLength = 3
)
func init() {
transports.Register(Transport)
}
var (
// Transport is an ImageTransport that uses either a default
// storage.Store or one that's it's explicitly told to use.
Transport StoreTransport = &storageTransport{}
// ErrInvalidReference is returned when ParseReference() is passed an
// empty reference.
ErrInvalidReference = errors.New("invalid reference")
// ErrPathNotAbsolute is returned when a graph root is not an absolute
// path name.
ErrPathNotAbsolute = errors.New("path name is not absolute")
)
// StoreTransport is an ImageTransport that uses a storage.Store to parse
// references, either its own default or one that it's told to use.
type StoreTransport interface {
types.ImageTransport
// SetStore sets the default store for this transport.
SetStore(storage.Store)
// GetStoreIfSet returns the default store for this transport, or nil if not set/determined yet.
GetStoreIfSet() storage.Store
// GetImage retrieves the image from the transport's store that's named
// by the reference.
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
GetImage(types.ImageReference) (*storage.Image, error)
// GetStoreImage retrieves the image from a specified store that's named
// by the reference.
//
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
// ParseStoreReference parses a reference, overriding any store
// specification that it may contain.
ParseStoreReference(store storage.Store, reference string) (*storageReference, error)
// NewStoreReference creates a reference for (named@ID) in store.
// either of name or ID can be unset; named must not be a reference.IsNameOnly.
NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error)
// SetDefaultUIDMap sets the default UID map to use when opening stores.
SetDefaultUIDMap(idmap []idtools.IDMap)
// SetDefaultGIDMap sets the default GID map to use when opening stores.
SetDefaultGIDMap(idmap []idtools.IDMap)
// DefaultUIDMap returns the default UID map used when opening stores.
DefaultUIDMap() []idtools.IDMap
// DefaultGIDMap returns the default GID map used when opening stores.
DefaultGIDMap() []idtools.IDMap
}
type storageTransport struct {
store storage.Store
defaultUIDMap []idtools.IDMap
defaultGIDMap []idtools.IDMap
}
func (s *storageTransport) Name() string {
// Still haven't really settled on a name.
return "containers-storage"
}
// SetStore sets the Store object which the Transport will use for parsing
// references when information about a Store is not directly specified as part
// of the reference. If one is not set, the library will attempt to initialize
// one with default settings when a reference needs to be parsed. Calling
// SetStore does not affect previously parsed references.
func (s *storageTransport) SetStore(store storage.Store) {
s.store = store
}
// GetStoreIfSet returns the default store for this transport, as set using SetStore() or initialized by default, or nil if not set/determined yet.
func (s *storageTransport) GetStoreIfSet() storage.Store {
return s.store
}
// SetDefaultUIDMap sets the default UID map to use when opening stores.
func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) {
s.defaultUIDMap = idmap
}
// SetDefaultGIDMap sets the default GID map to use when opening stores.
func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) {
s.defaultGIDMap = idmap
}
// DefaultUIDMap returns the default UID map used when opening stores.
func (s *storageTransport) DefaultUIDMap() []idtools.IDMap {
return s.defaultUIDMap
}
// DefaultGIDMap returns the default GID map used when opening stores.
func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
return s.defaultGIDMap
}
// ParseStoreReference takes a name or an ID, tries to figure out which it is
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
if ref == "" {
return nil, fmt.Errorf("%q is an empty reference: %w", ref, ErrInvalidReference)
}
if ref[0] == '[' {
// Ignore the store specifier.
closeIndex := strings.IndexRune(ref, ']')
if closeIndex < 1 {
return nil, fmt.Errorf("store specifier in %q did not end: %w", ref, ErrInvalidReference)
}
ref = ref[closeIndex+1:]
}
// The reference may end with an image ID. Image IDs and digests use the same "@" separator;
// here we only peel away an image ID, and leave digests alone.
split := strings.LastIndex(ref, "@")
id := ""
if split != -1 {
possibleID := ref[split+1:]
if possibleID == "" {
return nil, fmt.Errorf("empty trailing digest or ID in %q: %w", ref, ErrInvalidReference)
}
// If it looks like a digest, leave it alone for now.
if _, err := digest.Parse(possibleID); err != nil {
// Otherwise…
if err := validateImageID(possibleID); err == nil {
id = possibleID // … it is a full ID
} else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) {
// … it is a truncated version of the ID of an image that's present in local storage,
// so we might as well use the expanded value.
id = img.ID
} else {
return nil, fmt.Errorf("%q does not look like an image ID or digest: %w", possibleID, ErrInvalidReference)
}
// We have recognized an image ID; peel it off.
ref = ref[:split]
}
}
// If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's
// at least of what we guess is a reasonable minimum length, because we don't want a really short value
// like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") {
if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) {
// It's a truncated version of the ID of an image that's present in local storage;
// we need to expand it.
id = img.ID
ref = ""
}
}
var named reference.Named
// Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been
// completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest..
if ref != "" {
var err error
named, err = reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, fmt.Errorf("parsing named reference %q: %w", ref, err)
}
named = reference.TagNameOnly(named)
}
result, err := s.NewStoreReference(store, named, id)
if err != nil {
return nil, err
}
logrus.Debugf("parsed reference into %q", result.StringWithinTransport())
return result, nil
}
// NewStoreReference creates a reference for (named@ID) in store.
// either of name or ID can be unset; named must not be a reference.IsNameOnly.
func (s *storageTransport) NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) {
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id)
}
func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one
// of those, initialize one now.
if s.store == nil {
options, err := storage.DefaultStoreOptionsAutoDetectUID()
if err != nil {
return nil, err
}
options.UIDMap = s.defaultUIDMap
options.GIDMap = s.defaultGIDMap
store, err := storage.GetStore(options)
if err != nil {
return nil, err
}
s.store = store
}
return s.store, nil
}
// ParseReference takes a name and a tag or digest and/or ID
// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"),
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
// tries to figure out which it is, and returns it in a reference object.
// If _id_ is the ID of an image that's present in local storage, it can be truncated, and
// even be specified as if it were a _name_, value.
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
var store storage.Store
// Check if there's a store location prefix. If there is, then it
// needs to match a store that was previously initialized using
// storage.GetStore(), or be enough to let the storage library fill out
// the rest using knowledge that it has from elsewhere.
if len(reference) > 0 && reference[0] == '[' {
closeIndex := strings.IndexRune(reference, ']')
if closeIndex < 1 {
return nil, ErrInvalidReference
}
storeSpec := reference[1:closeIndex]
reference = reference[closeIndex+1:]
// Peel off a "driver@" from the start.
driverInfo := ""
driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@")
if !gotDriver {
storeSpec = driverPart1
if storeSpec == "" {
return nil, ErrInvalidReference
}
} else {
driverInfo = driverPart1
if driverInfo == "" {
return nil, ErrInvalidReference
}
storeSpec = driverPart2
if storeSpec == "" {
return nil, ErrInvalidReference
}
}
// Peel off a ":options" from the end.
var options []string
storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":")
if gotOptions {
options = strings.Split(optionsPart, ",")
}
// Peel off a "+runroot" from the new end.
storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+"
// The rest is our graph root.
rootInfo := storeSpec
// Check that any paths are absolute paths.
if rootInfo != "" && !filepath.IsAbs(rootInfo) {
return nil, ErrPathNotAbsolute
}
if runRootInfo != "" && !filepath.IsAbs(runRootInfo) {
return nil, ErrPathNotAbsolute
}
store2, err := storage.GetStore(storage.StoreOptions{
GraphDriverName: driverInfo,
GraphRoot: rootInfo,
RunRoot: runRootInfo,
GraphDriverOptions: options,
UIDMap: s.defaultUIDMap,
GIDMap: s.defaultGIDMap,
})
if err != nil {
return nil, err
}
store = store2
} else {
// We didn't have a store spec, so use the default.
store2, err := s.GetStore()
if err != nil {
return nil, err
}
store = store2
}
return s.ParseStoreReference(store, reference)
}
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
dref := ref.DockerReference()
if dref != nil {
if img, err := store.Image(dref.String()); err == nil {
return img, nil
}
}
if sref, ok := ref.(*storageReference); ok {
tmpRef := *sref
if img, err := tmpRef.resolveImage(nil); err == nil {
return img, nil
}
}
return nil, storage.ErrImageUnknown
}
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
store, err := s.GetStore()
if err != nil {
return nil, err
}
return s.GetStoreImage(store, ref)
}
func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
// Check that there's a store location prefix. Values we're passed are
// expected to come from PolicyConfigurationIdentity or
// PolicyConfigurationNamespaces, so if there's no store location,
// something's wrong.
if scope[0] != '[' {
return ErrInvalidReference
}
// Parse the store location prefix.
closeIndex := strings.IndexRune(scope, ']')
if closeIndex < 1 {
return ErrInvalidReference
}
storeSpec := scope[1:closeIndex]
scope = scope[closeIndex+1:]
storeInfo := strings.SplitN(storeSpec, "@", 2)
if len(storeInfo) == 1 && storeInfo[0] != "" {
// One component: the graph root.
if !filepath.IsAbs(storeInfo[0]) {
return ErrPathNotAbsolute
}
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
// Two components: the driver type and the graph root.
if !filepath.IsAbs(storeInfo[1]) {
return ErrPathNotAbsolute
}
} else {
// Anything else: scope specified in a form we don't
// recognize.
return ErrInvalidReference
}
// That might be all of it, and that's okay.
if scope == "" {
return nil
}
fields := strings.SplitN(scope, "@", 3)
switch len(fields) {
case 1: // name only
case 2: // name:tag@ID or name[:tag]@digest
if idErr := validateImageID(fields[1]); idErr != nil {
if _, digestErr := digest.Parse(fields[1]); digestErr != nil {
return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error())
}
}
case 3: // name[:tag]@digest@ID
if _, err := digest.Parse(fields[1]); err != nil {
return err
}
if err := validateImageID(fields[2]); err != nil {
return err
}
default: // Coverage: This should never happen
return errors.New("Internal error: unexpected number of fields form strings.SplitN")
}
// As for field[0], if it is non-empty at all:
// FIXME? We could be verifying the various character set and length restrictions
// from docker/distribution/reference.regexp.go, but other than that there
// are few semantically invalid strings.
return nil
}
// validateImageID returns nil if id is a valid (full) image ID, or an error
func validateImageID(id string) error {
_, err := digest.Parse("sha256:" + id)
return err
}

61
vendor/github.com/containers/image/v5/tarball/doc.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
// Package tarball provides a way to generate images using one or more layer
// tarballs and an optional template configuration.
//
// An example:
//
// package main
//
// import (
// "context"
//
// cp "github.com/containers/image/v5/copy"
// "github.com/containers/image/v5/signature"
// "github.com/containers/image/v5/tarball"
// "github.com/containers/image/v5/transports/alltransports"
// "github.com/containers/image/v5/types"
// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
// )
//
// func imageFromTarball() {
// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz")
// // - or -
// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz")
// if err != nil {
// panic(err)
// }
// updater, ok := src.(tarball.ConfigUpdater)
// if !ok {
// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater")
// }
// config := imgspecv1.Image{
// Config: imgspecv1.ImageConfig{
// Cmd: []string{"/bin/bash"},
// },
// }
// annotations := make(map[string]string)
// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache"
// err = updater.ConfigUpdate(config, annotations)
// if err != nil {
// panic(err)
// }
// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest")
// if err != nil {
// panic(err)
// }
//
// policy, err := signature.DefaultPolicy(nil)
// if err != nil {
// panic(err)
// }
//
// pc, err := signature.NewPolicyContext(policy)
// if err != nil {
// panic(err)
// }
// defer pc.Destroy()
// _, err = cp.Image(context.TODO(), pc, dest, src, nil)
// if err != nil {
// panic(err)
// }
// }
package tarball

View file

@ -0,0 +1,82 @@
package tarball
import (
"context"
"fmt"
"os"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
)
// ConfigUpdater is an interface that ImageReferences for "tarball" images also
// implement. It can be used to set values for a configuration, and to set
// image annotations which will be present in the images returned by the
// reference's NewImage() or NewImageSource() methods.
type ConfigUpdater interface {
ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error
}
type tarballReference struct {
config imgspecv1.Image
annotations map[string]string
filenames []string
stdin []byte
}
// ConfigUpdate updates the image's default configuration and adds annotations
// which will be visible in source images created using this reference.
func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error {
r.config = config
if r.annotations == nil {
r.annotations = make(map[string]string)
}
maps.Copy(r.annotations, annotations)
return nil
}
func (r *tarballReference) Transport() types.ImageTransport {
return Transport
}
func (r *tarballReference) StringWithinTransport() string {
return strings.Join(r.filenames, ":")
}
func (r *tarballReference) DockerReference() reference.Named {
return nil
}
func (r *tarballReference) PolicyConfigurationIdentity() string {
return ""
}
func (r *tarballReference) PolicyConfigurationNamespaces() []string {
return nil
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return image.FromReference(ctx, sys, r)
}
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
for _, filename := range r.filenames {
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("error removing %q: %w", filename, err)
}
}
return nil
}
func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`)
}

View file

@ -0,0 +1,234 @@
package tarball
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"runtime"
"strings"
"time"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/types"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
)
type tarballImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
impl.NoSignatures
impl.DoesNotAffectLayerInfosForCopy
stubs.NoGetBlobAtInitialize
reference tarballReference
blobs map[digest.Digest]tarballBlob
manifest []byte
}
// tarballBlob is a blob that tarballImagSource can return by GetBlob.
type tarballBlob struct {
contents []byte // or nil to read from filename below
filename string // valid if contents == nil
size int64
}
func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
// Pick up the layer comment from the configuration's history list, if one is set.
comment := "imported from tarball"
if len(r.config.History) > 0 && r.config.History[0].Comment != "" {
comment = r.config.History[0].Comment
}
// Gather up the digests, sizes, and history information for all of the files.
blobs := map[digest.Digest]tarballBlob{}
diffIDs := []digest.Digest{}
created := time.Time{}
history := []imgspecv1.History{}
layerDescriptors := []imgspecv1.Descriptor{}
for _, filename := range r.filenames {
var reader io.Reader
var blobTime time.Time
var blob tarballBlob
if filename == "-" {
reader = bytes.NewReader(r.stdin)
blobTime = time.Now()
blob = tarballBlob{
contents: r.stdin,
size: int64(len(r.stdin)),
}
} else {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
reader = file
fileinfo, err := file.Stat()
if err != nil {
return nil, fmt.Errorf("error reading size of %q: %w", filename, err)
}
blobTime = fileinfo.ModTime()
blob = tarballBlob{
filename: filename,
size: fileinfo.Size(),
}
}
// Default to assuming the layer is compressed.
layerType := imgspecv1.MediaTypeImageLayerGzip
// Set up to digest the file as it is.
blobIDdigester := digest.Canonical.Digester()
reader = io.TeeReader(reader, blobIDdigester.Hash())
// Set up to digest the file after we maybe decompress it.
diffIDdigester := digest.Canonical.Digester()
uncompressed, err := pgzip.NewReader(reader)
if err == nil {
// It is compressed, so the diffID is the digest of the uncompressed version
reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
} else {
// It is not compressed, so the diffID and the blobID are going to be the same
diffIDdigester = blobIDdigester
layerType = imgspecv1.MediaTypeImageLayer
uncompressed = nil
}
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
if _, err := io.Copy(io.Discard, reader); err != nil {
return nil, fmt.Errorf("error reading %q: %v", filename, err)
}
if uncompressed != nil {
uncompressed.Close()
}
// Grab our uncompressed and possibly-compressed digests and sizes.
diffID := diffIDdigester.Digest()
blobID := blobIDdigester.Digest()
diffIDs = append(diffIDs, diffID)
blobs[blobID] = blob
history = append(history, imgspecv1.History{
Created: &blobTime,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
Comment: comment,
})
// Use the mtime of the most recently modified file as the image's creation time.
if created.Before(blobTime) {
created = blobTime
}
layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{
Digest: blobID,
Size: blob.size,
MediaType: layerType,
})
}
// Pick up other defaults from the config in the reference.
config := r.config
if config.Created == nil {
config.Created = &created
}
if config.Architecture == "" {
config.Architecture = runtime.GOARCH
}
if config.OS == "" {
config.OS = runtime.GOOS
}
config.RootFS = imgspecv1.RootFS{
Type: "layers",
DiffIDs: diffIDs,
}
config.History = history
// Encode and digest the image configuration blob.
configBytes, err := json.Marshal(&config)
if err != nil {
return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
}
configID := digest.Canonical.FromBytes(configBytes)
blobs[configID] = tarballBlob{
contents: configBytes,
size: int64(len(configBytes)),
}
// Populate a manifest with the configuration blob and the layers.
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{
SchemaVersion: 2,
},
Config: imgspecv1.Descriptor{
Digest: configID,
Size: int64(len(configBytes)),
MediaType: imgspecv1.MediaTypeImageConfig,
},
Layers: layerDescriptors,
Annotations: maps.Clone(r.annotations),
}
// Encode the manifest.
manifestBytes, err := json.Marshal(&manifest)
if err != nil {
return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err)
}
// Return the image.
src := &tarballImageSource{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
HasThreadSafeGetBlob: false,
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(r),
reference: *r,
blobs: blobs,
manifest: manifestBytes,
}
src.Compat = impl.AddCompat(src)
return src, nil
}
func (is *tarballImageSource) Close() error {
return nil
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
blob, ok := is.blobs[blobinfo.Digest]
if !ok {
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
if blob.contents != nil {
return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil
}
reader, err := os.Open(blob.filename)
if err != nil {
return nil, -1, err
}
return reader, blob.size, nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
}
return is.manifest, imgspecv1.MediaTypeImageManifest, nil
}
func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}

View file

@ -0,0 +1,75 @@
package tarball
import (
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
)
const (
transportName = "tarball"
separator = ":"
)
var (
// Transport implements the types.ImageTransport interface for "tarball:" images,
// which are makeshift images constructed using one or more possibly-compressed tar
// archives.
Transport = &tarballTransport{}
)
type tarballTransport struct {
}
func (t *tarballTransport) Name() string {
return transportName
}
func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) {
var stdin []byte
var err error
filenames := strings.Split(reference, separator)
for _, filename := range filenames {
if filename == "-" {
stdin, err = io.ReadAll(os.Stdin)
if err != nil {
return nil, fmt.Errorf("error buffering stdin: %v", err)
}
continue
}
f, err := os.Open(filename)
if err != nil {
return nil, fmt.Errorf("error opening %q: %v", filename, err)
}
f.Close()
}
return NewReference(filenames, stdin)
}
// NewReference creates a new "tarball:" reference for the listed fileNames.
// If any of the fileNames is "-", the contents of stdin are used instead.
func NewReference(fileNames []string, stdin []byte) (types.ImageReference, error) {
for _, path := range fileNames {
if strings.Contains(path, separator) {
return nil, fmt.Errorf("Invalid path %q: paths including the separator %q are not supported", path, separator)
}
}
return &tarballReference{
filenames: fileNames,
stdin: stdin,
}, nil
}
func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error {
// See the explanation in daemonReference.PolicyConfigurationIdentity.
return errors.New(`tarball: does not support any scopes except the default "" one`)
}
func init() {
transports.Register(Transport)
}

View file

@ -0,0 +1,49 @@
package alltransports
import (
"fmt"
"strings"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
// Register all known transports.
// NOTE: Make sure docs/containers-transports.5.md and docs/containers-policy.json.5.md are updated when adding or updating
// a transport.
_ "github.com/containers/image/v5/directory"
_ "github.com/containers/image/v5/docker"
_ "github.com/containers/image/v5/docker/archive"
_ "github.com/containers/image/v5/oci/archive"
_ "github.com/containers/image/v5/oci/layout"
_ "github.com/containers/image/v5/openshift"
_ "github.com/containers/image/v5/sif"
_ "github.com/containers/image/v5/tarball"
// The docker-daemon transport is registeredy by docker_daemon*.go
// The ostree transport is registered by ostree*.go
// The storage transport is registered by storage*.go
)
// ParseImageName converts a URL-like image name to a types.ImageReference.
func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
transportName, withinTransport, valid := strings.Cut(imgName, ":")
if !valid {
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
}
transport := transports.Get(transportName)
if transport == nil {
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
}
return transport.ParseReference(withinTransport)
}
// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
// the transport is unknown or when the input is invalid.
func TransportFromImageName(imageName string) types.ImageTransport {
// Keep this in sync with ParseImageName!
transportName, _, valid := strings.Cut(imageName, ":")
if valid {
return transports.Get(transportName)
}
return nil
}

View file

@ -0,0 +1,9 @@
//go:build !containers_image_docker_daemon_stub
// +build !containers_image_docker_daemon_stub
package alltransports
import (
// Register the docker-daemon transport
_ "github.com/containers/image/v5/docker/daemon"
)

View file

@ -0,0 +1,10 @@
//go:build containers_image_docker_daemon_stub
// +build containers_image_docker_daemon_stub
package alltransports
import "github.com/containers/image/v5/transports"
func init() {
transports.Register(transports.NewStubTransport("docker-daemon"))
}

View file

@ -0,0 +1,9 @@
//go:build containers_image_ostree && linux
// +build containers_image_ostree,linux
package alltransports
import (
// Register the ostree transport
_ "github.com/containers/image/v5/ostree"
)

View file

@ -0,0 +1,10 @@
//go:build !containers_image_ostree || !linux
// +build !containers_image_ostree !linux
package alltransports
import "github.com/containers/image/v5/transports"
func init() {
transports.Register(transports.NewStubTransport("ostree"))
}

View file

@ -0,0 +1,9 @@
//go:build !containers_image_storage_stub
// +build !containers_image_storage_stub
package alltransports
import (
// Register the storage transport
_ "github.com/containers/image/v5/storage"
)

View file

@ -0,0 +1,10 @@
//go:build containers_image_storage_stub
// +build containers_image_storage_stub
package alltransports
import "github.com/containers/image/v5/transports"
func init() {
transports.Register(transports.NewStubTransport("containers-storage"))
}

View file

@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 29
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

186
vendor/github.com/containers/storage/.cirrus.yml generated vendored Normal file
View file

@ -0,0 +1,186 @@
---
# Main collection of env. vars to set for all tasks and scripts.
env:
####
#### Global variables used for all tasks
####
# Overrides default location (/tmp/cirrus) for repo clone
CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/storage"
# Shell used to execute all script commands
CIRRUS_SHELL: "/bin/bash"
# Automation script path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
# No need to go crazy, but grab enough to cover most PRs
CIRRUS_CLONE_DEPTH: 50
####
#### Cache-image names to test with (double-quotes around names are critical)
###
FEDORA_NAME: "fedora-39ß"
DEBIAN_NAME: "debian-13"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20231004t194547z-f39f38d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
####
#### Command variables to help avoid duplication
####
# Command to prefix every output line with a timestamp
# (can't do inline awk script, Cirrus-CI or YAML mangles quoting)
_TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk'
_DFCMD: 'df -lhTx tmpfs'
_RAUDITCMD: 'cat /var/log/audit/audit.log'
_UAUDITCMD: 'cat /var/log/kern.log'
_JOURNALCMD: 'journalctl -b'
gcp_credentials: ENCRYPTED[c87717f04fb15499d19a3b3fa0ad2cdedecc047e82967785d101e9bc418e93219f755e662feac8390088a2df1a4d8464]
# Default timeout for each task
timeout_in: 120m
# Default VM to use unless set or modified by task
gce_instance:
image_project: "${IMAGE_PROJECT}"
zone: "us-central1-b" # Required by Cirrus for the time being
cpu: 2
memory: "4Gb"
disk: 200
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
linux_testing: &linux_testing
depends_on:
- lint
gce_instance: # Only need to specify differences from defaults (above)
image_name: "${VM_IMAGE}"
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}'
always:
df_script: '${_DFCMD} || true'
rh_audit_log_script: '${_RAUDITCMD} || true'
debian_audit_log_script: '${_UAUDITCMD} || true'
journal_log_script: '${_JOURNALCMD} || true'
fedora_testing_task: &fedora_testing
<<: *linux_testing
alias: fedora_testing
name: &std_test_name "${OS_NAME} ${TEST_DRIVER}"
env:
OS_NAME: "${FEDORA_NAME}"
VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}"
# Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types.
matrix: &test_matrix
- env:
TEST_DRIVER: "vfs"
- env:
TEST_DRIVER: "overlay"
- env:
TEST_DRIVER: "overlay-transient"
- env:
TEST_DRIVER: "fuse-overlay"
- env:
TEST_DRIVER: "fuse-overlay-whiteout"
- env:
TEST_DRIVER: "btrfs"
# aufs was dropped between 20.04 and 22.04, can't test it
debian_testing_task: &debian_testing
<<: *linux_testing
alias: debian_testing
name: *std_test_name
env:
OS_NAME: "${DEBIAN_NAME}"
VM_IMAGE: "${DEBIAN_CACHE_IMAGE_NAME}"
# Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types.
matrix:
- env:
TEST_DRIVER: "vfs"
- env:
TEST_DRIVER: "overlay"
- env:
TEST_DRIVER: "fuse-overlay"
- env:
TEST_DRIVER: "fuse-overlay-whiteout"
- env:
TEST_DRIVER: "btrfs"
lint_task:
env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
container:
image: golang
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
build_script: |
apt-get update
apt-get install -y libbtrfs-dev libdevmapper-dev
test_script: |
make TAGS=regex_precompile local-validate
make lint
make clean
# Update metadata on VM images referenced by this repository state
meta_task:
container:
image: "quay.io/libpod/imgts:latest"
cpu: 1
memory: 1
env:
# Space-separated list of images used by this repository state
IMGNAMES: |-
${FEDORA_CACHE_IMAGE_NAME}
${DEBIAN_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
GCPNAME: ENCRYPTED[91cf7aa421858b26b67835978d224b4a5c46afcf52a0f1ec1b69a99b248715dc8e92a1b56fde18e092acf256fa80ae9c]
GCPPROJECT: ENCRYPTED[79b0f7eb5958e25bc7095d5d368fa8d94447a43ffacb9c693de438186e2f767b7efe9563d6954297ae4730220e10aa9c]
CIRRUS_CLONE_DEPTH: 1 # source not used
script: '/usr/local/bin/entrypoint.sh |& ${_TIMESTAMP}'
vendor_task:
container:
image: golang
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
build_script: make vendor
test_script: hack/tree_status.sh
cross_task:
container:
image: golang:1.19
build_script: make cross
# Represent overall pass/fail status from required dependent tasks
success_task:
depends_on:
- lint
- fedora_testing
- debian_testing
- meta
- vendor
- cross
container:
image: golang:1.19
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true

3
vendor/github.com/containers/storage/.dockerignore generated vendored Normal file
View file

@ -0,0 +1,3 @@
bundles
.gopath
vendor/pkg

32
vendor/github.com/containers/storage/.gitignore generated vendored Normal file
View file

@ -0,0 +1,32 @@
# containers/storage project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
*.1
*.5
*.exe
*~
*.orig
*.test
.*.swp
.DS_Store
.idea*
# a .bashrc may be added to customize the build environment
.bashrc
.gopath/
docs/AWS_S3_BUCKET
docs/GITCOMMIT
docs/GIT_BRANCH
docs/VERSION
docs/_build
docs/_static
docs/_templates
docs/changed-files
# generated by man/md2man-all.sh
man/man1
man/man5
man/man8
tests/tools/build
vendor/pkg/
.vagrant
/containers-storage
/containers-storage.*

11
vendor/github.com/containers/storage/.golangci.yml generated vendored Normal file
View file

@ -0,0 +1,11 @@
---
run:
concurrency: 6
deadline: 5m
skip-dirs-use-default: true
linters:
enable:
- gofumpt
disable:
- errcheck
- staticcheck

254
vendor/github.com/containers/storage/.mailmap generated vendored Normal file
View file

@ -0,0 +1,254 @@
# Generate AUTHORS: hack/generate-authors.sh
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
# duplicates that aren't also email duplicates): scan the output of:
# git log --format='%aE - %aN' | sort -uf
#
# For explanation on this file format: man git-shortlog
Patrick Stapleton <github@gdi2290.com>
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
Erwin van der Koogh <info@erronis.nl>
Ahmed Kamal <email.ahmedkamal@googlemail.com>
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
Cristian Staretu <cristian.staretu@gmail.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
Marcus Linke <marcus.linke@gmx.de>
Aleksandrs Fadins <aleks@s-ko.net>
Christopher Latham <sudosurootdev@gmail.com>
Hu Keping <hukeping@huawei.com>
Wayne Chang <wayne@neverfear.org>
Chen Chao <cc272309126@gmail.com>
Daehyeok Mun <daehyeok@gmail.com>
<daehyeok@gmail.com> <daehyeok@daehyeokui-MacBook-Air.local>
<jt@yadutaf.fr> <admin@jtlebi.fr>
<jeff@docker.com> <jefferya@programmerq.net>
<charles.hooper@dotcloud.com> <chooper@plumata.com>
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@docker.com>
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@charmes.net>
<kencochrane@gmail.com> <KenCochrane@gmail.com>
Thatcher Peskens <thatcher@docker.com>
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
Joffrey F <joffrey@docker.com>
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Andy Smith <github@anarkystic.com>
<kalessin@kalessin.fr> <louis@dotcloud.com>
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
<victor.vieux@docker.com> <victor@dotcloud.com>
<victor.vieux@docker.com> <dev@vvieux.com>
<victor.vieux@docker.com> <victor@docker.com>
<victor.vieux@docker.com> <vieux@docker.com>
<victor.vieux@docker.com> <victorvieux@gmail.com>
<dominik@honnef.co> <dominikh@fork-bomb.org>
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
Walter Stanish <walter@pratyeka.org>
<daniel@gasienica.ch> <dgasienica@zynga.com>
Roberto Hashioka <roberto_hashioka@hotmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
David Sissitka <me@dsissitka.com>
Nolan Darilek <nolan@thewordnerd.info>
<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
Benoit Chesneau <bchesneau@gmail.com>
Jordan Arentsen <blissdev@gmail.com>
Daniel Garcia <daniel@danielgarcia.info>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
Faiz Khan <faizkhan00@gmail.com>
Victor Lyuboslavsky <victor@victoreda.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
Shih-Yuan Lee <fourdollars@gmail.com>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
<proppy@google.com> <proppy@aminche.com>
<michael@docker.com> <michael@crosbymichael.com>
<michael@docker.com> <crosby.michael@gmail.com>
<michael@docker.com> <crosbymichael@gmail.com>
<github@developersupport.net> <github@metaliveblog.com>
<brandon@ifup.org> <brandon@ifup.co>
<dano@spotify.com> <daniel.norberg@gmail.com>
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
<shawn@churchofgit.com> <shawnlandden@gmail.com>
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
<solomon@docker.com> <solomon.hykes@dotcloud.com>
<solomon@docker.com> <solomon@dotcloud.com>
<solomon@docker.com> <s@docker.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
<alexl@redhat.com> <alexander.larsson@gmail.com>
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
Alexander Morozov <lk4d4@docker.com>
<git.nivoc@neverbox.com> <kuehnle@online.de>
O.S. Tezer <ostezer@gmail.com>
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
<taim@bosboot.org> <maztaim@users.noreply.github.com>
<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
<vbatts@redhat.com> <vbatts@hashbangbash.com>
<altsysrq@gmail.com> <iamironbob@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
Liang-Chi Hsieh <viirya@gmail.com>
Aleksa Sarai <asarai@suse.de>
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
Will Weaver <monkey@buildingbananas.com>
Timothy Hobbs <timothyhobbs@seznam.cz>
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
<github@hollensbe.org> <erik+github@hollensbe.org>
<github@albersweb.de> <albers@users.noreply.github.com>
<lsm5@fedoraproject.org> <lsm5@redhat.com>
<marc@marc-abramowitz.com> <msabramo@gmail.com>
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
<bernat@luffy.cx> <vincent@bernat.im>
<bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
<p@pwaller.net> <peter@scraperwiki.com>
<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
Francisco Carriedo <fcarriedo@gmail.com>
<julienbordellier@gmail.com> <git@julienbordellier.com>
<ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
<arnaud.porterie@docker.com> <icecrime@gmail.com>
<baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
Brian Goff <cpuguy83@gmail.com>
<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
<eric@windisch.us> <ewindisch@docker.com>
<frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
Hollie Teal <hollie@docker.com>
<hollie@docker.com> <hollie.teal@docker.com>
<hollie@docker.com> <hollietealok@users.noreply.github.com>
<huu@prismskylabs.com> <whoshuu@gmail.com>
Jessica Frazelle <jess@mesosphere.com>
Jessica Frazelle <jess@mesosphere.com> <jfrazelle@users.noreply.github.com>
Jessica Frazelle <jess@mesosphere.com> <acidburn@docker.com>
Jessica Frazelle <jess@mesosphere.com> <jess@docker.com>
Jessica Frazelle <jess@mesosphere.com> <princess@docker.com>
<konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
<tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
<estesp@linux.vnet.ibm.com> <estesp@gmail.com>
<github@gone.nl> <thaJeztah@users.noreply.github.com>
Thomas LEVEIL <thomasleveil@gmail.com> Thomas LÉVEIL <thomasleveil@users.noreply.github.com>
<oi@truffles.me.uk> <timruffles@googlemail.com>
<Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
John Howard (VM) <John.Howard@microsoft.com>
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
mattyw <mattyw@me.com> <gh@mattyw.net>
resouer <resouer@163.com> <resouer@gmail.com>
AJ Bowen <aj@gandi.net> soulshake <amy@gandi.net>
AJ Bowen <aj@gandi.net> soulshake <aj@gandi.net>
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
Vincent Bernat <bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
Yestin Sun <sunyi0804@gmail.com> <yestin.sun@polyera.com>
bin liu <liubin0329@users.noreply.github.com> <liubin0329@gmail.com>
John Howard (VM) <John.Howard@microsoft.com> jhowardmsft <jhoward@microsoft.com>
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
Tangi COLIN <tangicolin@gmail.com> tangicolin <tangicolin@gmail.com>
Allen Sun <allen.sun@daocloud.io>
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
<aanm90@gmail.com> <martins@noironetworks.com>
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
Chander G <chandergovind@gmail.com>
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
Ying Li <cyli@twistedmatrix.com>
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
<dqminh@cloudflare.com> <dqminh89@gmail.com>
Daniel, Dao Quang Minh <dqminh@cloudflare.com>
Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
Doug Tangren <d.tangren@gmail.com>
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
Ben Golub <ben.golub@dotcloud.com>
Harold Cooper <hrldcpr@gmail.com>
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
Justin Cormack <justin.cormack@docker.com>
<justin.cormack@docker.com> <justin.cormack@unikernel.com>
<justin.cormack@docker.com> <justin@specialbusservice.com>
Kamil Domański <kamil@domanski.co>
Lei Jitang <leijitang@huawei.com>
<leijitang@huawei.com> <leijitang@gmail.com>
Linus Heckemann <lheckemann@twig-world.com>
<lheckemann@twig-world.com> <anonymouse2048@gmail.com>
Lynda O'Leary <lyndaoleary29@gmail.com>
<lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
Marianna Tessel <mtesselh@gmail.com>
Michael Huettermann <michael@huettermann.net>
Moysés Borges <moysesb@gmail.com>
<moysesb@gmail.com> <moyses.furtado@wplex.com.br>
Nigel Poulton <nigelpoulton@hotmail.com>
Qiang Huang <h.huangqiang@huawei.com>
<h.huangqiang@huawei.com> <qhuang@10.0.2.15>
Boaz Shuster <ripcurld.github@gmail.com>
Shuwei Hao <haosw@cn.ibm.com>
<haosw@cn.ibm.com> <haoshuwei24@gmail.com>
Soshi Katsuta <soshi.katsuta@gmail.com>
<soshi.katsuta@gmail.com> <katsuta_soshi@cyberagent.co.jp>
Stefan Berger <stefanb@linux.vnet.ibm.com>
<stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
Stephen Day <stephen.day@docker.com>
<stephen.day@docker.com> <stevvooe@users.noreply.github.com>
Toli Kuznets <toli@docker.com>
Tristan Carel <tristan@cogniteev.com>
<tristan@cogniteev.com> <tristan.carel@gmail.com>
Vincent Demeester <vincent@sbr.pm>
<vincent@sbr.pm> <vincent+github@demeester.fr>
Vishnu Kannan <vishnuk@google.com>
xlgao-zju <xlgao@zju.edu.cn> xlgao <xlgao@zju.edu.cn>
yuchangchun <yuchangchun1@huawei.com> y00277921 <yuchangchun1@huawei.com>
<zij@case.edu> <zjaffee@us.ibm.com>
<anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
<eungjun.yi@navercorp.com> <semtlenori@gmail.com>
<haosw@cn.ibm.com> <haoshuwei1989@163.com>
Hao Shu Wei <haosw@cn.ibm.com>
<matt.bentley@docker.com> <mbentley@mbentley.net>
<MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
<redmond.martin@gmail.com> <xgithub@redmond5.com>
<redmond.martin@gmail.com> <martin@tinychat.com>
<srbrahma@us.ibm.com> <sbrahma@us.ibm.com>
<suda.akihiro@lab.ntt.co.jp> <suda.kyoto@gmail.com>
<thomas@gazagnaire.org> <thomas@gazagnaire.com>
Shengbo Song <thomassong@tencent.com> mYmNeo <mymneo@163.com>
Shengbo Song <thomassong@tencent.com>
<sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
Sylvain Bellemare <sylvain@ascribe.io>

View file

@ -0,0 +1,3 @@
## The Containers Storage Project Community Code of Conduct
The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).

144
vendor/github.com/containers/storage/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,144 @@
# Contributing to Containers/Storage
We'd love to have you join the community! Below summarizes the processes
that we follow.
## Topics
* [Reporting Issues](#reporting-issues)
* [Submitting Pull Requests](#submitting-pull-requests)
* [Communications](#communications)
<!--
* [Becoming a Maintainer](#becoming-a-maintainer)
-->
## Reporting Issues
Before reporting an issue, check our backlog of
[open issues](https://github.com/containers/storage/issues)
to see if someone else has already reported it. If so, feel free to add
your scenario, or additional information, to the discussion. Or simply
"subscribe" to it to be notified when it is updated.
If you find a new issue with the project we'd love to hear about it! The most
important aspect of a bug report is that it includes enough information for
us to reproduce it. So, please include as much detail as possible and try
to remove the extra stuff that doesn't really relate to the issue itself.
The easier it is for us to reproduce it, the faster it'll be fixed!
Please don't include any private/sensitive information in your issue!
## Submitting Pull Requests
No Pull Request (PR) is too small! Typos, additional comments in the code,
new testcases, bug fixes, new features, more documentation, ... it's all
welcome!
While bug fixes can first be identified via an "issue", that is not required.
It's ok to just open up a PR with the fix, but make sure you include the same
information you would have included in an issue - like how to reproduce it.
PRs for new features should include some background on what use cases the
new code is trying to address. When possible and when it makes sense, try to break-up
larger PRs into smaller ones - it's easier to review smaller
code changes. But only if those smaller ones make sense as stand-alone PRs.
Regardless of the type of PR, all PRs should include:
* well documented code changes
* additional testcases. Ideally, they should fail w/o your code change applied
* documentation changes
Squash your commits into logical pieces of work that might want to be reviewed
separate from the rest of the PRs. But, squashing down to just one commit is ok
too since in the end the entire PR will be reviewed anyway. When in doubt,
squash.
PRs that fix issues should include a reference like `Closes #XXXX` in the
commit message so that github will automatically close the referenced issue
when the PR is merged.
<!--
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
-->
### Sign your PRs
The sign-off is a line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
## Communications
For general questions, or discussions, please use the
IRC group on `irc.freenode.net` called `container-projects`
that has been setup.
For discussions around issues/bugs and features, you can use the github
[issues](https://github.com/containers/storage/issues)
and
[PRs](https://github.com/containers/storage/pulls)
tracking system.
<!--
## Becoming a Maintainer
To become a maintainer you must first be nominated by an existing maintainer.
If a majority (>50%) of maintainers agree then the proposal is adopted and
you will be added to the list.
Removing a maintainer requires at least 75% of the remaining maintainers
approval, or if the person requests to be removed then it is automatic.
Normally, a maintainer will only be removed if they are considered to be
inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file.
-->

94
vendor/github.com/containers/storage/Makefile generated vendored Normal file
View file

@ -0,0 +1,94 @@
.PHONY: \
all \
binary \
clean \
codespell \
containers-storage \
cross \
default \
docs \
gccgo \
help \
install \
install.docs \
install.tools \
lint \
local-binary \
local-cross \
local-gccgo \
local-test \
local-test-integration \
local-test-unit \
local-validate \
test-integration \
test-unit \
validate \
vendor \
vendor-in-container
NATIVETAGS :=
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
clean: ## remove all built files
$(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5
containers-storage: ## build using gc on the host
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
codespell:
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w
binary local-binary: containers-storage
local-gccgo gccgo: ## build using gccgo on the host
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
local-cross cross: ## cross build the binaries for arm, darwin, and freebsd
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/riscv64 linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
done
docs: install.tools ## build the docs on the host
$(MAKE) -C docs docs
local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests
local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
@$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./...
local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
@cd tests; ./test_runner.bash
local-validate validate: install.tools ## validate DCO on the host
@./hack/git-validation.sh
install.tools:
$(MAKE) -C tests/tools
install.docs: docs
$(MAKE) -C docs install
install: install.docs
lint: install.tools
tests/tools/build/golangci-lint run --build-tags="$(AUTOTAGS) $(TAGS)"
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
vendor:
$(GO) mod tidy
$(GO) mod vendor
$(GO) mod verify

32
vendor/github.com/containers/storage/OWNERS generated vendored Normal file
View file

@ -0,0 +1,32 @@
approvers:
- Luap99
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg
reviewers:
- Luap99
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg

46
vendor/github.com/containers/storage/README.md generated vendored Normal file
View file

@ -0,0 +1,46 @@
`storage` is a Go library which aims to provide methods for storing filesystem
layers, container images, and containers. A `containers-storage` CLI wrapper
is also included for manual and scripting use.
To build the CLI wrapper, use 'make binary'.
Operations which use VMs expect to launch them using 'vagrant', defaulting to
using its 'libvirt' provider. The boxes used are also available for the
'virtualbox' provider, and can be selected by setting $VAGRANT_PROVIDER to
'virtualbox' before kicking off the build.
The library manages three types of items: layers, images, and containers.
A *layer* is a copy-on-write filesystem which is notionally stored as a set of
changes relative to its *parent* layer, if it has one. A given layer can only
have one parent, but any layer can be the parent of multiple layers. Layers
which are parents of other layers should be treated as read-only.
An *image* is a reference to a particular layer (its _top_ layer), along with
other information which the library can manage for the convenience of its
caller. This information typically includes configuration templates for
running a binary contained within the image's layers, and may include
cryptographic signatures. Multiple images can reference the same layer, as the
differences between two images may not be in their layer contents.
A *container* is a read-write layer which is a child of an image's top layer,
along with information which the library can manage for the convenience of its
caller. This information typically includes configuration information for
running the specific container. Multiple containers can be derived from a
single image.
Layers, images, and containers are represented primarily by 32 character
hexadecimal IDs, but items of each kind can also have one or more arbitrary
names attached to them, which the library will automatically resolve to IDs
when they are passed in to API calls which expect IDs.
The library can store what it calls *metadata* for each of these types of
items. This is expected to be a small piece of data, since it is cached in
memory and stored along with the library's own bookkeeping information.
Additionally, the library can store one or more of what it calls *big data* for
images and containers. This is a named chunk of larger data, which is only in
memory when it is being read from or being written to its own disk file.
**[Contributing](CONTRIBUTING.md)**
Information about contributing to this project.

3
vendor/github.com/containers/storage/SECURITY.md generated vendored Normal file
View file

@ -0,0 +1,3 @@
## Security and Disclosure Information Policy for the Containers Storage Project
The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.

1
vendor/github.com/containers/storage/VERSION generated vendored Normal file
View file

@ -0,0 +1 @@
1.51.0

1153
vendor/github.com/containers/storage/check.go generated vendored Normal file

File diff suppressed because it is too large Load diff

981
vendor/github.com/containers/storage/containers.go generated vendored Normal file
View file

@ -0,0 +1,981 @@
package storage
import (
"errors"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
type containerLocations uint8
// The backing store is split in two json files, one (the volatile)
// that is written without fsync() meaning it isn't as robust to
// unclean shutdown
const (
stableContainerLocation containerLocations = 1 << iota
volatileContainerLocation
numContainerLocationIndex = iota
)
func containerLocationFromIndex(index int) containerLocations {
return 1 << index
}
// A Container is a reference to a read-write layer with metadata.
type Container struct {
// ID is either one which was specified at create-time, or a random
// value which was generated by the library.
ID string `json:"id"`
// Names is an optional set of user-defined convenience values. The
// container can be referred to by its ID or any of its names. Names
// are unique among containers.
Names []string `json:"names,omitempty"`
// ImageID is the ID of the image which was used to create the container.
ImageID string `json:"image"`
// LayerID is the ID of the read-write layer for the container itself.
// It is assumed that the image's top layer is the parent of the container's
// read-write layer.
LayerID string `json:"layer"`
// Metadata is data we keep for the convenience of the caller. It is not
// expected to be large, since it is kept in memory.
Metadata string `json:"metadata,omitempty"`
// BigDataNames is a list of names of data items that we keep for the
// convenience of the caller. They can be large, and are only in
// memory when being read from or written to disk.
BigDataNames []string `json:"big-data-names,omitempty"`
// BigDataSizes maps the names in BigDataNames to the sizes of the data
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// BigDataDigests maps the names in BigDataNames to the digests of the
// data that has been stored, if they're known.
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
// Created is the datestamp for when this container was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
// UIDMap and GIDMap are used for setting up a container's root
// filesystem for use inside of a user namespace where UID mapping is
// being used.
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
}
// rwContainerStore provides bookkeeping for information about Containers.
type rwContainerStore interface {
metadataStore
containerBigDataStore
flaggableStore
// startWriting makes sure the store is fresh, and locks it for writing.
// If this succeeds, the caller MUST call stopWriting().
startWriting() error
// stopWriting releases locks obtained by startWriting.
stopWriting()
// startReading makes sure the store is fresh, and locks it for reading.
// If this succeeds, the caller MUST call stopReading().
startReading() error
// stopReading releases locks obtained by startReading.
stopReading()
// create creates a container that has a specified ID (or generates a
// random one if an empty value is supplied) and optional names,
// based on the specified image, using the specified layer as its
// read-write layer.
// The maps in the container's options structure are recorded for the
// convenience of the caller, nothing more.
create(id string, names []string, image, layer string, options *ContainerOptions) (*Container, error)
// updateNames modifies names associated with a container based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
// Get retrieves information about a container given an ID or name.
Get(id string) (*Container, error)
// Exists checks if there is a container with the given ID or name.
Exists(id string) bool
// Delete removes the record of the container.
Delete(id string) error
// Wipe removes records of all containers.
Wipe() error
// Lookup attempts to translate a name to an ID. Most methods do this
// implicitly.
Lookup(name string) (string, error)
// Containers returns a slice enumerating the known containers.
Containers() ([]Container, error)
// Clean up unreferenced datadirs
GarbageCollect() error
}
type containerStore struct {
// The following fields are only set when constructing containerStore, and must never be modified afterwards.
// They are safe to access without any other locking.
lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process.
dir string
jsonPath [numContainerLocationIndex]string
inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
// The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
// Almost all users should use startReading() or startWriting().
lastWrite lockfile.LastWrite
containers []*Container
idindex *truncindex.TruncIndex
byid map[string]*Container
bylayer map[string]*Container
byname map[string]*Container
}
func copyContainer(c *Container) *Container {
return &Container{
ID: c.ID,
Names: copyStringSlice(c.Names),
ImageID: c.ImageID,
LayerID: c.LayerID,
Metadata: c.Metadata,
BigDataNames: copyStringSlice(c.BigDataNames),
BigDataSizes: copyStringInt64Map(c.BigDataSizes),
BigDataDigests: copyStringDigestMap(c.BigDataDigests),
Created: c.Created,
UIDMap: copyIDMap(c.UIDMap),
GIDMap: copyIDMap(c.GIDMap),
Flags: copyStringInterfaceMap(c.Flags),
volatileStore: c.volatileStore,
}
}
func (c *Container) MountLabel() string {
if label, ok := c.Flags[mountLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) ProcessLabel() string {
if label, ok := c.Flags[processLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) MountOpts() []string {
switch value := c.Flags[mountOptsFlag].(type) {
case []string:
return value
case []interface{}:
var mountOpts []string
for _, v := range value {
if flag, ok := v.(string); ok {
mountOpts = append(mountOpts, flag)
}
}
return mountOpts
default:
return nil
}
}
// The caller must hold r.inProcessLock for reading.
func containerLocation(c *Container) containerLocations {
if c.volatileStore {
return volatileContainerLocation
}
return stableContainerLocation
}
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
// If this succeeds, the caller MUST call stopWriting().
//
// This is an internal implementation detail of containerStore construction, every other caller
// should use startWriting() instead.
func (r *containerStore) startWritingWithReload(canReload bool) error {
r.lockfile.Lock()
r.inProcessLock.Lock()
succeeded := false
defer func() {
if !succeeded {
r.inProcessLock.Unlock()
r.lockfile.Unlock()
}
}()
if canReload {
if _, err := r.reloadIfChanged(true); err != nil {
return err
}
}
succeeded = true
return nil
}
// startWriting makes sure the store is fresh, and locks it for writing.
// If this succeeds, the caller MUST call stopWriting().
func (r *containerStore) startWriting() error {
return r.startWritingWithReload(true)
}
// stopWriting releases locks obtained by startWriting.
func (r *containerStore) stopWriting() {
r.inProcessLock.Unlock()
r.lockfile.Unlock()
}
// startReading makes sure the store is fresh, and locks it for reading.
// If this succeeds, the caller MUST call stopReading().
func (r *containerStore) startReading() error {
// inProcessLocked calls the nested function with r.inProcessLock held for writing.
inProcessLocked := func(fn func() error) error {
r.inProcessLock.Lock()
defer r.inProcessLock.Unlock()
return fn()
}
r.lockfile.RLock()
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil.
defer func() {
if unlockFn != nil {
unlockFn()
}
}()
r.inProcessLock.RLock()
unlockFn = r.stopReading
// If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
_, modified, err := r.modified()
if err != nil {
return err
}
if modified {
// We are unlucky, and need to reload.
// NOTE: Multiple goroutines can get to this place approximately simultaneously.
r.inProcessLock.RUnlock()
unlockFn = r.lockfile.Unlock
// r.lastWrite can change at this point if another goroutine reloads the store before us. Thats why we dont unconditionally
// trigger a load below; we (lock and) reloadIfChanged() again.
// First try reloading with r.lockfile held for reading.
// r.inProcessLock will serialize all goroutines that got here;
// each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
var tryLockedForWriting bool
if err := inProcessLocked(func() error {
// We could optimize this further: The r.lockfile.GetLastWrite() value shouldnt change as long as we hold r.lockfile,
// so if r.lastWrite was already updated, we dont need to actually read the on-filesystem lock.
var err error
tryLockedForWriting, err = r.reloadIfChanged(false)
return err
}); err != nil {
if !tryLockedForWriting {
return err
}
// Not good enough, we need r.lockfile held for writing. So, lets do that.
unlockFn()
unlockFn = nil
r.lockfile.Lock()
unlockFn = r.lockfile.Unlock
if err := inProcessLocked(func() error {
_, err := r.reloadIfChanged(true)
return err
}); err != nil {
return err
}
unlockFn()
unlockFn = nil
r.lockfile.RLock()
unlockFn = r.lockfile.Unlock
// We need to check for a reload once more because the on-disk state could have been modified
// after we released the lock.
// If that, _again_, finds inconsistent state, just give up.
// We could, plausibly, retry a few times, but that inconsistent state (duplicate container names)
// shouldnt be saved (by correct implementations) in the first place.
if err := inProcessLocked(func() error {
_, err := r.reloadIfChanged(false)
return err
}); err != nil {
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
}
}
// NOTE that we hold neither a read nor write inProcessLock at this point. Thats fine in ordinary operation, because
// the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
// protects us against in-process writers modifying data.
// In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
// and 2) access to the in-memory data is not racy;
// but we cant protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
r.inProcessLock.RLock()
}
unlockFn = nil
return nil
}
// stopReading releases locks obtained by startReading.
func (r *containerStore) stopReading() {
r.inProcessLock.RUnlock()
r.lockfile.Unlock()
}
// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store),
// and a lockfile.LastWrite value for that update.
//
// The caller must hold r.lockfile for reading _or_ writing.
// The caller must hold r.inProcessLock for reading or writing.
func (r *containerStore) modified() (lockfile.LastWrite, bool, error) {
return r.lockfile.ModifiedSince(r.lastWrite)
}
// reloadIfChanged reloads the contents of the store from disk if it is changed.
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing.
//
// The caller must hold r.inProcessLock for WRITING.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// reloadIfChanged() with lockedForWriting could succeed.
func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
lastWrite, modified, err := r.modified()
if err != nil {
return false, err
}
// We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load()
// and modify no fields, to ensure they see fresh data:
// r.lockfile.Modified() only returns true once per change. Without an exclusive lock,
// one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could
// see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale.
if modified {
if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
}
r.lastWrite = lastWrite
}
return false, nil
}
// Requires startReading or startWriting.
func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers))
for i := range r.containers {
containers[i] = *copyContainer(r.containers[i])
}
return containers, nil
}
// This looks for datadirs in the store directory that are not referenced
// by the json file and removes it. These can happen in the case of unclean
// shutdowns or regular restarts in transient store mode.
// Requires startReading.
func (r *containerStore) GarbageCollect() error {
entries, err := os.ReadDir(r.dir)
if err != nil {
// Unexpected, don't try any GC
return err
}
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
if !entry.IsDir() || stringid.ValidateID(id) != nil {
continue
}
// Should the id be there?
if r.byid[id] != nil {
continue
}
// Otherwise remove datadir
logrus.Debugf("removing %q", filepath.Join(r.dir, id))
moreErr := os.RemoveAll(filepath.Join(r.dir, id))
// Propagate first error
if moreErr != nil && err == nil {
err = moreErr
}
}
return err
}
func (r *containerStore) datadir(id string) string {
return filepath.Join(r.dir, id)
}
func (r *containerStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}
// load reloads the contents of the store from disk.
//
// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
// manage r.lastWrite.
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing.
// The caller must hold r.inProcessLock for WRITING.
//
// If !lockedForWriting and this function fails, the return value indicates whether
// retrying with lockedForWriting could succeed.
func (r *containerStore) load(lockedForWriting bool) (bool, error) {
var modifiedLocations containerLocations
containers := []*Container{}
ids := make(map[string]*Container)
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
location := containerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return false, err
}
locationContainers := []*Container{}
if len(data) != 0 {
if err := json.Unmarshal(data, &locationContainers); err != nil {
return false, fmt.Errorf("loading %q: %w", rpath, err)
}
}
for _, container := range locationContainers {
// There should be no duplicated ids between json files, but lets check to be sure
if ids[container.ID] != nil {
continue // skip invalid duplicated container
}
// Remember where the container came from
if location == volatileContainerLocation {
container.volatileStore = true
}
containers = append(containers, container)
ids[container.ID] = container
}
}
idlist := make([]string, 0, len(containers))
layers := make(map[string]*Container)
names := make(map[string]*Container)
var errorToResolveBySaving error // == nil
for n, container := range containers {
idlist = append(idlist, container.ID)
layers[container.LayerID] = containers[n]
for _, name := range container.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
modifiedLocations |= containerLocation(container)
}
names[name] = containers[n]
}
}
r.containers = containers
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
r.byid = ids
r.bylayer = layers
r.byname = names
if errorToResolveBySaving != nil {
if !lockedForWriting {
return true, errorToResolveBySaving
}
return false, r.save(modifiedLocations)
}
return false, nil
}
// save saves the contents of the store to disk.
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *containerStore) save(saveLocations containerLocations) error {
r.lockfile.AssertLockedForWriting()
// This must be done before we write the file, because the process could be terminated
// after the file is written but before the lock file is updated.
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
location := containerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
subsetContainers := make([]*Container, 0, len(r.containers))
for _, container := range r.containers {
if containerLocation(container) == location {
subsetContainers = append(subsetContainers, container)
}
}
jdata, err := json.Marshal(&subsetContainers)
if err != nil {
return err
}
var opts *ioutils.AtomicFileWriterOptions
if location == volatileContainerLocation {
opts = &ioutils.AtomicFileWriterOptions{
NoSync: true,
}
}
if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0o600, opts); err != nil {
return err
}
}
return nil
}
// saveFor saves the contents of the store relevant for modifiedContainer to disk.
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *containerStore) saveFor(modifiedContainer *Container) error {
return r.save(containerLocation(modifiedContainer))
}
func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
if err := os.MkdirAll(dir, 0o700); err != nil {
return nil, err
}
volatileDir := dir
if transient {
if err := os.MkdirAll(runDir, 0o700); err != nil {
return nil, err
}
volatileDir = runDir
}
lockfile, err := lockfile.GetLockFile(filepath.Join(volatileDir, "containers.lock"))
if err != nil {
return nil, err
}
cstore := containerStore{
lockfile: lockfile,
dir: dir,
jsonPath: [numContainerLocationIndex]string{
filepath.Join(dir, "containers.json"),
filepath.Join(volatileDir, "volatile-containers.json"),
},
containers: []*Container{},
byid: make(map[string]*Container),
bylayer: make(map[string]*Container),
byname: make(map[string]*Container),
}
if err := cstore.startWritingWithReload(false); err != nil {
return nil, err
}
cstore.lastWrite, err = cstore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
defer cstore.stopWriting()
if _, err := cstore.load(true); err != nil {
return nil, err
}
return &cstore, nil
}
// Requires startReading or startWriting.
func (r *containerStore) lookup(id string) (*Container, bool) {
if container, ok := r.byid[id]; ok {
return container, ok
} else if container, ok := r.byname[id]; ok {
return container, ok
} else if container, ok := r.bylayer[id]; ok {
return container, ok
} else if longid, err := r.idindex.Get(id); err == nil {
if container, ok := r.byid[longid]; ok {
return container, ok
}
}
return nil, false
}
// Requires startWriting.
func (r *containerStore) ClearFlag(id string, flag string) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
delete(container.Flags, flag)
return r.saveFor(container)
}
// Requires startWriting.
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
if container.Flags == nil {
container.Flags = make(map[string]interface{})
}
container.Flags[flag] = value
return r.saveFor(container)
}
// Requires startWriting.
func (r *containerStore) create(id string, names []string, image, layer string, options *ContainerOptions) (container *Container, err error) {
if options == nil {
options = &ContainerOptions{}
}
if id == "" {
id = stringid.GenerateRandomID()
_, idInUse := r.byid[id]
for idInUse {
id = stringid.GenerateRandomID()
_, idInUse = r.byid[id]
}
}
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
}
names = dedupeStrings(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, fmt.Errorf("the container name %q is already in use by %s. You have to remove that container to be able to reuse that name: %w", name, r.byname[name].ID, ErrDuplicateName)
}
}
if err := hasOverlappingRanges(options.UIDMap); err != nil {
return nil, err
}
if err := hasOverlappingRanges(options.GIDMap); err != nil {
return nil, err
}
container = &Container{
ID: id,
Names: names,
ImageID: image,
LayerID: layer,
Metadata: options.Metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: time.Now().UTC(),
Flags: copyStringInterfaceMap(options.Flags),
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
volatileStore: options.Volatile,
}
if options.MountOpts != nil {
container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
container.Flags[volatileFlag] = true
}
r.containers = append(r.containers, container)
// This can only fail on duplicate IDs, which shouldnt happen — and in
// that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id)
r.byid[id] = container
r.bylayer[layer] = container
for _, name := range names {
r.byname[name] = container
}
defer func() {
if err != nil {
// now that the in-memory structures know about the new
// record, we can use regular Delete() to clean up if
// anything breaks from here on out
if e := r.Delete(id); e != nil {
logrus.Debugf("while cleaning up partially-created container %q we failed to create: %v", id, e)
}
}
}()
err = r.saveFor(container)
if err != nil {
return nil, err
}
for _, item := range options.BigData {
if err = r.SetBigData(id, item.Key, item.Data); err != nil {
return nil, err
}
}
container = copyContainer(container)
return container, err
}
// Requires startReading or startWriting.
func (r *containerStore) Metadata(id string) (string, error) {
if container, ok := r.lookup(id); ok {
return container.Metadata, nil
}
return "", ErrContainerUnknown
}
// Requires startWriting.
func (r *containerStore) SetMetadata(id, metadata string) error {
if container, ok := r.lookup(id); ok {
container.Metadata = metadata
return r.saveFor(container)
}
return ErrContainerUnknown
}
// The caller must hold r.inProcessLock for writing.
func (r *containerStore) removeName(container *Container, name string) {
container.Names = stringSliceWithoutValue(container.Names, name)
}
// Requires startWriting.
func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
oldNames := container.Names
names, err := applyNameOperation(oldNames, names, op)
if err != nil {
return err
}
for _, name := range oldNames {
delete(r.byname, name)
}
for _, name := range names {
if otherContainer, ok := r.byname[name]; ok {
r.removeName(otherContainer, name)
}
r.byname[name] = container
}
container.Names = names
return r.saveFor(container)
}
// Requires startWriting.
func (r *containerStore) Delete(id string) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
id = container.ID
toDeleteIndex := -1
for i, candidate := range r.containers {
if candidate.ID == id {
toDeleteIndex = i
break
}
}
delete(r.byid, id)
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
// The stores Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
_ = r.idindex.Delete(id)
delete(r.bylayer, container.LayerID)
for _, name := range container.Names {
delete(r.byname, name)
}
if toDeleteIndex != -1 {
// delete the container at toDeleteIndex
if toDeleteIndex == len(r.containers)-1 {
r.containers = r.containers[:len(r.containers)-1]
} else {
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
}
}
if err := r.saveFor(container); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
return err
}
return nil
}
// Requires startReading or startWriting.
func (r *containerStore) Get(id string) (*Container, error) {
if container, ok := r.lookup(id); ok {
return copyContainer(container), nil
}
return nil, ErrContainerUnknown
}
// Requires startReading or startWriting.
func (r *containerStore) Lookup(name string) (id string, err error) {
if container, ok := r.lookup(name); ok {
return container.ID, nil
}
return "", ErrContainerUnknown
}
// Requires startReading or startWriting.
func (r *containerStore) Exists(id string) bool {
_, ok := r.lookup(id)
return ok
}
// Requires startReading or startWriting.
func (r *containerStore) BigData(id, key string) ([]byte, error) {
if key == "" {
return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
}
return os.ReadFile(r.datapath(c.ID, key))
}
// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
}
if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if err = r.SetBigData(id, key, data); err == nil {
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
}
if size, ok := c.BigDataSizes[key]; ok {
return size, nil
}
} else {
return -1, err
}
}
return -1, ErrSizeUnknown
}
// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
}
if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if err = r.SetBigData(id, key, data); err == nil {
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
}
if d, ok := c.BigDataDigests[key]; ok {
return d, nil
}
} else {
return "", err
}
}
return "", ErrDigestUnknown
}
// Requires startReading or startWriting.
func (r *containerStore) BigDataNames(id string) ([]string, error) {
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
}
return copyStringSlice(c.BigDataNames), nil
}
// Requires startWriting.
func (r *containerStore) SetBigData(id, key string, data []byte) error {
if key == "" {
return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName)
}
c, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
if err := os.MkdirAll(r.datadir(c.ID), 0o700); err != nil {
return err
}
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0o600)
if err == nil {
save := false
if c.BigDataSizes == nil {
c.BigDataSizes = make(map[string]int64)
}
oldSize, sizeOk := c.BigDataSizes[key]
c.BigDataSizes[key] = int64(len(data))
if c.BigDataDigests == nil {
c.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := c.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
c.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
addName := true
for _, name := range c.BigDataNames {
if name == key {
addName = false
break
}
}
if addName {
c.BigDataNames = append(c.BigDataNames, key)
save = true
}
if save {
err = r.saveFor(c)
}
}
return err
}
// Requires startWriting.
func (r *containerStore) Wipe() error {
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}
for _, id := range ids {
if err := r.Delete(id); err != nil {
return err
}
}
return nil
}

216
vendor/github.com/containers/storage/deprecated.go generated vendored Normal file
View file

@ -0,0 +1,216 @@
package storage
import (
"io"
"time"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
digest "github.com/opencontainers/go-digest"
)
// The type definitions in this file exist ONLY to maintain formal API compatibility.
// DO NOT ADD ANY NEW METHODS TO THESE INTERFACES.
// ROFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ROFileBasedStore interface {
Locker
Load() error
ReloadIfChanged() error
}
// RWFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type RWFileBasedStore interface {
Save() error
}
// FileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type FileBasedStore interface {
ROFileBasedStore
RWFileBasedStore
}
// ROMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ROMetadataStore interface {
Metadata(id string) (string, error)
}
// RWMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type RWMetadataStore interface {
SetMetadata(id, metadata string) error
}
// MetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type MetadataStore interface {
ROMetadataStore
RWMetadataStore
}
// ROBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ROBigDataStore interface {
BigData(id, key string) ([]byte, error)
BigDataSize(id, key string) (int64, error)
BigDataDigest(id, key string) (digest.Digest, error)
BigDataNames(id string) ([]string, error)
}
// RWImageBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type RWImageBigDataStore interface {
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
}
// ContainerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ContainerBigDataStore interface {
ROBigDataStore
SetBigData(id, key string, data []byte) error
}
// ROLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ROLayerBigDataStore interface {
BigData(id, key string) (io.ReadCloser, error)
BigDataNames(id string) ([]string, error)
}
// RWLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type RWLayerBigDataStore interface {
SetBigData(id, key string, data io.Reader) error
}
// LayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type LayerBigDataStore interface {
ROLayerBigDataStore
RWLayerBigDataStore
}
// FlaggableStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type FlaggableStore interface {
ClearFlag(id string, flag string) error
SetFlag(id string, flag string, value interface{}) error
}
// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ContainerStore interface {
FileBasedStore
MetadataStore
ContainerBigDataStore
FlaggableStore
Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
SetNames(id string, names []string) error
AddNames(id string, names []string) error
RemoveNames(id string, names []string) error
Get(id string) (*Container, error)
Exists(id string) bool
Delete(id string) error
Wipe() error
Lookup(name string) (string, error)
Containers() ([]Container, error)
}
// ROImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ROImageStore interface {
ROFileBasedStore
ROMetadataStore
ROBigDataStore
Exists(id string) bool
Get(id string) (*Image, error)
Lookup(name string) (string, error)
Images() ([]Image, error)
ByDigest(d digest.Digest) ([]*Image, error)
}
// ImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ImageStore interface {
ROImageStore
RWFileBasedStore
RWMetadataStore
RWImageBigDataStore
FlaggableStore
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
SetNames(id string, names []string) error
AddNames(id string, names []string) error
RemoveNames(id string, names []string) error
Delete(id string) error
Wipe() error
}
// ROLayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type ROLayerStore interface {
ROFileBasedStore
ROMetadataStore
ROLayerBigDataStore
Exists(id string) bool
Get(id string) (*Layer, error)
Status() ([][2]string, error)
Changes(from, to string) ([]archive.Change, error)
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
DiffSize(from, to string) (int64, error)
Size(name string) (int64, error)
Lookup(name string) (string, error)
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
Layers() ([]Layer, error)
}
// LayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
//
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
type LayerStore interface {
ROLayerStore
RWFileBasedStore
RWMetadataStore
FlaggableStore
RWLayerBigDataStore
Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
SetNames(id string, names []string) error
AddNames(id string, names []string) error
RemoveNames(id string, names []string) error
Delete(id string) error
Wipe() error
Mount(id string, options drivers.MountOpts) (string, error)
Unmount(id string, force bool) (bool, error)
Mounted(id string) (int, error)
ParentOwners(id string) (uids, gids []int, err error)
ApplyDiff(to string, diff io.Reader) (int64, error)
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
CleanupStagingDirectory(stagingDirectory string) error
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
DifferTarget(id string) (string, error)
LoadLocked() error
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
}

View file

@ -0,0 +1,781 @@
//go:build linux
// +build linux
/*
aufs driver directory structure
.
layers // Metadata of layers
1
2
3
diff // Content of the layer
1 // Contains layers that need to be mounted for the id
2
3
mnt // Mount points for the rw layers to be mounted
1
2
3
*/
package aufs
import (
"bufio"
"errors"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"time"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/locker"
mountpk "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
)
var (
// ErrAufsNotSupported is returned if aufs is not supported by the host.
ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems")
// ErrAufsNested means aufs cannot be used bc we are in a user namespace
ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace")
backingFs = "<unknown>"
enableDirpermLock sync.Once
enableDirperm bool
)
const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("aufs", Init)
}
// Driver contains information about the filesystem mounted.
type Driver struct {
sync.Mutex
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
pathCacheLock sync.Mutex
pathCache map[string]string
naiveDiff graphdriver.DiffDriver
locker *locker.Locker
mountOptions string
}
// Init returns a new AUFS driver.
// An error is returned if AUFS is not supported.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported)
}
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
backingFs = fsName
}
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("AUFS is not supported over %s", backingFs)
return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS)
}
var mountOptions string
for _, option := range options.DriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
}
key = strings.ToLower(key)
switch key {
case "aufs.mountopt":
mountOptions = val
default:
return nil, fmt.Errorf("option %s not supported", option)
}
}
paths := []string{
"mnt",
"diff",
"layers",
}
a := &Driver{
root: home,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
locker: locker.New(),
mountOptions: mountOptions,
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
// Create the root aufs driver dir and return
// if it already exists
// If not populate the dir structure
if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil {
if os.IsExist(err) {
return a, nil
}
return nil, err
}
if err := mountpk.MakePrivate(home); err != nil {
return nil, err
}
// Populate the dir structure
for _, p := range paths {
if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil {
return nil, err
}
}
logger := logrus.WithFields(logrus.Fields{
"module": "graphdriver",
"driver": "aufs",
})
for _, path := range []string{"mnt", "diff"} {
p := filepath.Join(home, path)
entries, err := os.ReadDir(p)
if err != nil {
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
continue
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
if strings.HasSuffix(entry.Name(), "-removing") {
logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir")
if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil {
logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir")
}
}
}
}
a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, a)
return a, nil
}
// Return a nil error if the kernel supports aufs
// We cannot modprobe because inside dind modprobe fails
// to run
func supportsAufs() error {
// We can try to modprobe aufs first before looking at
// proc/filesystems for when aufs is supported
exec.Command("modprobe", "aufs").Run()
if unshare.IsRootless() {
return ErrAufsNested
}
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.Contains(s.Text(), "aufs") {
return nil
}
}
return ErrAufsNotSupported
}
func (a *Driver) rootPath() string {
return a.root
}
func (*Driver) String() string {
return "aufs"
}
// Status returns current information about the filesystem such as root directory, number of directories mounted, etc.
func (a *Driver) Status() [][2]string {
ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
return [][2]string{
{"Root Dir", a.rootPath()},
{"Backing Filesystem", backingFs},
{"Dirs", fmt.Sprintf("%d", len(ids))},
{"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())},
}
}
// Metadata not implemented
func (a *Driver) Metadata(id string) (map[string]string, error) {
return nil, nil //nolint: nilnil
}
// Exists returns true if the given id is registered with
// this driver
func (a *Driver) Exists(id string) bool {
if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
return false
}
return true
}
// ListLayers() returns all of the layers known to the driver.
func (a *Driver) ListLayers() ([]string, error) {
diffsDir := filepath.Join(a.rootPath(), "diff")
entries, err := os.ReadDir(diffsDir)
if err != nil {
return nil, err
}
results := make([]string, 0, len(entries))
for _, entry := range entries {
if !entry.IsDir() {
continue
}
results = append(results, entry.Name())
}
return results, nil
}
// AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string {
return nil
}
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
if opts == nil {
opts = &graphdriver.CreateOpts{}
}
return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return a.Create(id, parent, opts)
}
// Create three folders for each id
// mnt, layers, and diff
func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if opts != nil && len(opts.StorageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for aufs")
}
if err := a.createDirsFor(id, parent); err != nil {
return err
}
// Write the layers metadata
f, err := os.Create(path.Join(a.rootPath(), "layers", id))
if err != nil {
return err
}
defer f.Close()
if parent != "" {
ids, err := getParentIDs(a.rootPath(), parent)
if err != nil {
return err
}
if _, err := fmt.Fprintln(f, parent); err != nil {
return err
}
for _, i := range ids {
if _, err := fmt.Fprintln(f, i); err != nil {
return err
}
}
}
return nil
}
// createDirsFor creates two directories for the given id.
// mnt and diff
func (a *Driver) createDirsFor(id, parent string) error {
paths := []string{
"mnt",
"diff",
}
// Directory permission is 0555.
// The path of directories are <aufs_root_path>/mnt/<image_id>
// and <aufs_root_path>/diff/<image_id>
for _, p := range paths {
rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair()
rootPerms := defaultPerms
if parent != "" {
st, err := system.Stat(path.Join(a.rootPath(), p, parent))
if err != nil {
return err
}
rootPerms = os.FileMode(st.Mode())
rootPair.UID = int(st.UID())
rootPair.GID = int(st.GID())
}
if err := idtools.MkdirAllAndChownNew(path.Join(a.rootPath(), p, id), rootPerms, rootPair); err != nil {
return err
}
}
return nil
}
// Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error {
a.locker.Lock(id)
defer a.locker.Unlock(id)
a.pathCacheLock.Lock()
mountpoint, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
mountpoint = a.getMountpoint(id)
}
logger := logrus.WithFields(logrus.Fields{
"module": "graphdriver",
"driver": "aufs",
"layer": id,
})
var retries int
for {
mounted, err := a.mounted(mountpoint)
if err != nil {
if os.IsNotExist(err) {
break
}
return err
}
if !mounted {
break
}
err = a.unmount(mountpoint)
if err == nil {
break
}
if err != unix.EBUSY {
return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err)
}
if retries >= 5 {
return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err)
}
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
retries++
logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries)
time.Sleep(100 * time.Millisecond)
}
// Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("removing layers dir for %s: %w", id, err)
}
if err := atomicRemove(a.getDiffPath(id)); err != nil {
return fmt.Errorf("could not remove diff path for id %s: %w", id, err)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that container runtime doesn't find it anymore) before doing removal of
// the whole tree.
if err := atomicRemove(mountpoint); err != nil {
if errors.Is(err, unix.EBUSY) {
logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
}
return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err)
}
a.pathCacheLock.Lock()
delete(a.pathCache, id)
a.pathCacheLock.Unlock()
return nil
}
func atomicRemove(source string) error {
target := source + "-removing"
err := os.Rename(source, target)
switch {
case err == nil, os.IsNotExist(err):
case os.IsExist(err):
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
if _, e := os.Stat(source); !os.IsNotExist(e) {
return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err)
}
default:
return fmt.Errorf("preparing atomic delete: %w", err)
}
return system.EnsureRemoveAll(target)
}
// Get returns the rootfs path for the id.
// This will mount the dir at its given path
func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
a.locker.Lock(id)
defer a.locker.Unlock(id)
parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) {
return "", err
}
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
m = a.getDiffPath(id)
if len(parents) > 0 {
m = a.getMountpoint(id)
}
}
if count := a.ctr.Increment(m); count > 1 {
return m, nil
}
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
if err := a.mount(id, m, parents, options); err != nil {
return "", err
}
}
a.pathCacheLock.Lock()
a.pathCache[id] = m
a.pathCacheLock.Unlock()
return m, nil
}
// Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error {
a.locker.Lock(id)
defer a.locker.Unlock(id)
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
m = a.getMountpoint(id)
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
if count := a.ctr.Decrement(m); count > 0 {
return nil
}
err := a.unmount(m)
if err != nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
}
return err
}
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
// For AUFS, it queries the mountpoint for this ID.
func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
a.locker.Lock(id)
defer a.locker.Unlock(id)
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
m = a.getMountpoint(id)
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
return directory.Usage(m)
}
// isParent returns if the passed in parent is the direct parent of the passed in layer
func (a *Driver) isParent(id, parent string) bool {
parents, _ := getParentIDs(a.rootPath(), id)
if parent == "" && len(parents) > 0 {
return false
}
return !(len(parents) > 0 && parent != parents[0])
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) {
if !a.isParent(id, parent) {
return a.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel)
}
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
// AUFS doesn't need the parent layer to produce a diff.
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
Compression: archive.Uncompressed,
ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir},
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
})
}
type fileGetNilCloser struct {
storage.FileGetter
}
func (f fileGetNilCloser) Close() error {
return nil
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences. Used for direct access for tar-split.
func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
p := path.Join(a.rootPath(), "diff", id)
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
}
func (a *Driver) applyDiff(id string, idMappings *idtools.IDMappings, diff io.Reader) error {
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
})
}
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (a *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
if !a.isParent(id, parent) {
return a.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
}
// AUFS doesn't need the parent layer to calculate the diff size.
return directory.Size(path.Join(a.rootPath(), "diff", id))
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (a *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
if !a.isParent(id, parent) {
return a.naiveDiff.ApplyDiff(id, parent, options)
}
// AUFS doesn't need the parent id to apply the diff if it is the direct parent.
if err = a.applyDiff(id, options.Mappings, options.Diff); err != nil {
return
}
return directory.Size(path.Join(a.rootPath(), "diff", id))
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (a *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) {
if !a.isParent(id, parent) {
return a.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel)
}
// AUFS doesn't have snapshots, so we need to get changes from all parent
// layers.
layers, err := a.getParentLayerPaths(id)
if err != nil {
return nil, err
}
return archive.Changes(layers, path.Join(a.rootPath(), "diff", id))
}
func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
parentIds, err := getParentIDs(a.rootPath(), id)
if err != nil {
return nil, err
}
layers := make([]string, len(parentIds))
// Get the diff paths for all the parent ids
for i, p := range parentIds {
layers[i] = path.Join(a.rootPath(), "diff", p)
}
return layers, nil
}
func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error {
a.Lock()
defer a.Unlock()
// If the id is mounted or we get an error return
if mounted, err := a.mounted(target); err != nil || mounted {
return err
}
rw := a.getDiffPath(id)
if err := a.aufsMount(layers, rw, target, options); err != nil {
return fmt.Errorf("creating aufs mount to %s: %w", target, err)
}
return nil
}
func (a *Driver) unmount(mountPath string) error {
a.Lock()
defer a.Unlock()
if mounted, err := a.mounted(mountPath); err != nil || !mounted {
return err
}
if err := Unmount(mountPath); err != nil {
return err
}
return nil
}
func (a *Driver) mounted(mountpoint string) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
}
// Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error {
var dirs []string
if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() {
return nil
}
dirs = append(dirs, path)
return nil
}); err != nil {
return err
}
for _, m := range dirs {
if err := a.unmount(m); err != nil {
logrus.Debugf("aufs error unmounting %s: %s", m, err)
}
}
return mountpk.Unmount(a.root)
}
func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
defer func() {
if err != nil {
Unmount(target)
}
}()
// Mount options are clipped to page size(4096 bytes). If there are more
// layers then these are remounted individually using append.
offset := 54
if useDirperm() {
offset += len(",dirperm1")
}
b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
index := 0
for ; index < len(ro); index++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
if bp+len(layer) > len(b) {
break
}
bp += copy(b[bp:], layer)
}
opts := "dio,xino=/dev/shm/aufs.xino"
mountOptions := a.mountOptions
if len(options.Options) > 0 {
mountOptions = strings.Join(options.Options, ",")
}
if mountOptions != "" {
opts += fmt.Sprintf(",%s", mountOptions)
}
if useDirperm() {
opts += ",dirperm1"
}
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel)
if err = mount("none", target, "aufs", 0, data); err != nil {
return
}
for ; index < len(ro); index++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel)
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
return
}
}
return
}
// useDirperm checks dirperm1 mount option can be used with the current
// version of aufs.
func useDirperm() bool {
enableDirpermLock.Do(func() {
base, err := os.MkdirTemp("", "storage-aufs-base")
if err != nil {
logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
union, err := os.MkdirTemp("", "storage-aufs-union")
if err != nil {
logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(union)
opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base)
if err := mount("none", union, "aufs", 0, opts); err != nil {
return
}
enableDirperm = true
if err := Unmount(union); err != nil {
logrus.Errorf("Checking dirperm1: failed to unmount %v", err)
}
})
return enableDirperm
}
// UpdateLayerIDMap updates ID mappings in a layer from matching the ones
// specified by toContainer to those specified by toHost.
func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
return fmt.Errorf("aufs doesn't support changing ID mappings")
}
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (a *Driver) SupportsShifting() bool {
return false
}

View file

@ -0,0 +1,64 @@
//go:build linux
// +build linux
package aufs
import (
"bufio"
"os"
"path"
)
// Return all the directories
func loadIds(root string) ([]string, error) {
dirs, err := os.ReadDir(root)
if err != nil {
return nil, err
}
out := []string{}
for _, d := range dirs {
if !d.IsDir() {
out = append(out, d.Name())
}
}
return out, nil
}
// Read the layers file for the current id and return all the
// layers represented by new lines in the file
//
// If there are no lines in the file then the id has no parent
// and an empty slice is returned.
func getParentIDs(root, id string) ([]string, error) {
f, err := os.Open(path.Join(root, "layers", id))
if err != nil {
return nil, err
}
defer f.Close()
out := []string{}
s := bufio.NewScanner(f)
for s.Scan() {
if t := s.Text(); t != "" {
out = append(out, s.Text())
}
}
return out, s.Err()
}
func (a *Driver) getMountpoint(id string) string {
return path.Join(a.mntPath(), id)
}
func (a *Driver) mntPath() string {
return path.Join(a.rootPath(), "mnt")
}
func (a *Driver) getDiffPath(id string) string {
return path.Join(a.diffPath(), id)
}
func (a *Driver) diffPath() string {
return path.Join(a.rootPath(), "diff")
}

View file

@ -0,0 +1,22 @@
//go:build linux
// +build linux
package aufs
import (
"os/exec"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// Unmount the target specified.
func Unmount(target string) error {
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
}
if err := unix.Unmount(target, 0); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,7 @@
package aufs
import "golang.org/x/sys/unix"
func mount(source string, target string, fstype string, flags uintptr, data string) error {
return unix.Mount(source, target, fstype, flags, data)
}

View file

@ -0,0 +1,695 @@
//go:build linux && cgo
// +build linux,cgo
package btrfs
/*
#include <stdlib.h>
#include <dirent.h>
// keep struct field name compatible with btrfs-progs < 6.1.
#define max_referenced max_rfer
#include <btrfs/ioctl.h>
#include <btrfs/ctree.h>
static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
}
*/
import "C"
import (
"fmt"
"io/fs"
"math"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"unsafe"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
"github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("btrfs", Init)
}
type btrfsOptions struct {
minSpace uint64
size uint64
}
// Init returns a new BTRFS driver.
// An error is returned if BTRFS is not supported.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsMagic != graphdriver.FsMagicBtrfs {
return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
opt, userDiskQuota, err := parseOptions(options.DriverOptions)
if err != nil {
return nil, err
}
driver := &Driver{
home: home,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
options: opt,
}
if userDiskQuota {
if err := driver.enableQuota(); err != nil {
return nil, err
}
}
return graphdriver.NewNaiveDiffDriver(driver, graphdriver.NewNaiveLayerIDMapUpdater(driver)), nil
}
func parseOptions(opt []string) (btrfsOptions, bool, error) {
var options btrfsOptions
userDiskQuota := false
for _, option := range opt {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return options, userDiskQuota, err
}
key = strings.ToLower(key)
switch key {
case "btrfs.min_space":
minSpace, err := units.RAMInBytes(val)
if err != nil {
return options, userDiskQuota, err
}
userDiskQuota = true
options.minSpace = uint64(minSpace)
case "btrfs.mountopt":
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
default:
return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option)
}
}
return options, userDiskQuota, nil
}
// Driver contains information about the filesystem mounted.
type Driver struct {
// root of the file system
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
options btrfsOptions
quotaEnabled bool
once sync.Once
}
// String prints the name of the driver (btrfs).
func (d *Driver) String() string {
return "btrfs"
}
// Status returns current driver information in a two dimensional string array.
// Output contains "Build Version" and "Library Version" of the btrfs libraries used.
// Version information can be used to check compatibility with your kernel.
func (d *Driver) Status() [][2]string {
status := [][2]string{}
if bv := btrfsBuildVersion(); bv != "-" {
status = append(status, [2]string{"Build Version", bv})
}
if lv := btrfsLibVersion(); lv != -1 {
status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)})
}
return status
}
// Metadata returns empty metadata for this driver.
func (d *Driver) Metadata(id string) (map[string]string, error) {
return nil, nil //nolint: nilnil
}
// Cleanup unmounts the home directory.
func (d *Driver) Cleanup() error {
return mount.Unmount(d.home)
}
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("can't open dir %s", path)
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
func subvolCreate(path, name string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_vol_args
for i, c := range []byte(name) {
args.name[i] = C.char(c)
}
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("failed to create btrfs subvolume: %w", errno)
}
return nil
}
func subvolSnapshot(src, dest, name string) error {
srcDir, err := openDir(src)
if err != nil {
return err
}
defer closeDir(srcDir)
destDir, err := openDir(dest)
if err != nil {
return err
}
defer closeDir(destDir)
var args C.struct_btrfs_ioctl_vol_args_v2
args.fd = C.__s64(getDirFd(srcDir))
cs := C.CString(name)
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("failed to create btrfs snapshot: %w", errno)
}
return nil
}
func isSubvolume(p string) (bool, error) {
var bufStat unix.Stat_t
if err := unix.Lstat(p, &bufStat); err != nil {
return false, err
}
// return true if it is a btrfs subvolume
return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil
}
func subvolDelete(dirpath, name string, quotaEnabled bool) error {
dir, err := openDir(dirpath)
if err != nil {
return err
}
defer closeDir(dir)
fullPath := path.Join(dirpath, name)
var args C.struct_btrfs_ioctl_vol_args
// walk the btrfs subvolumes
walkSubvolumes := func(p string, d fs.DirEntry, err error) error {
if err != nil {
if os.IsNotExist(err) && p != fullPath {
// missing most likely because the path was a subvolume that got removed in the previous iteration
// since it's gone anyway, we don't care
return nil
}
return fmt.Errorf("walking subvolumes: %w", err)
}
// we want to check children only so skip itself
// it will be removed after the filepath walk anyways
if d.IsDir() && p != fullPath {
sv, err := isSubvolume(p)
if err != nil {
return fmt.Errorf("failed to test if %s is a btrfs subvolume: %w", p, err)
}
if sv {
if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil {
return fmt.Errorf("failed to destroy btrfs child subvolume (%s) of parent (%s): %w", p, dirpath, err)
}
}
}
return nil
}
if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil {
return fmt.Errorf("recursively walking subvolumes for %s failed: %w", dirpath, err)
}
if quotaEnabled {
if qgroupid, err := subvolLookupQgroup(fullPath); err == nil {
var args C.struct_btrfs_ioctl_qgroup_create_args
args.qgroupid = C.__u64(qgroupid)
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error())
}
} else {
logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error())
}
}
// all subvolumes have been removed
// now remove the one originally passed in
for i, c := range []byte(name) {
args.name[i] = C.char(c)
}
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("failed to destroy btrfs snapshot %s for %s: %w", dirpath, name, errno)
}
return nil
}
func (d *Driver) updateQuotaStatus() {
d.once.Do(func() {
if !d.quotaEnabled {
// In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed
if err := qgroupStatus(d.home); err != nil {
// quota is still not enabled
return
}
d.quotaEnabled = true
}
})
}
func (d *Driver) enableQuota() error {
d.updateQuotaStatus()
if d.quotaEnabled {
return nil
}
dir, err := openDir(d.home)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("failed to enable btrfs quota for %s: %w", dir, errno)
}
d.quotaEnabled = true
return nil
}
func (d *Driver) subvolRescanQuota() error {
d.updateQuotaStatus()
if !d.quotaEnabled {
return nil
}
dir, err := openDir(d.home)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_rescan_args
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("failed to rescan btrfs quota for %s: %w", dir, errno)
}
return nil
}
func subvolLimitQgroup(path string, size uint64) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_qgroup_limit_args
args.lim.max_rfer = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("failed to limit qgroup for %s: %w", dir, errno)
}
return nil
}
// qgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
// with search key of BTRFS_QGROUP_STATUS_KEY.
// In case qgroup is enabled, the returned key type will match BTRFS_QGROUP_STATUS_KEY.
// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035
func qgroupStatus(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_search_args
args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID
args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY
args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY
args.key.max_objectid = C.__u64(math.MaxUint64)
args.key.max_offset = C.__u64(math.MaxUint64)
args.key.max_transid = C.__u64(math.MaxUint64)
args.key.nr_items = 4096
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("failed to search qgroup for %s: %w", path, errno)
}
sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf))
if sh._type != C.BTRFS_QGROUP_STATUS_KEY {
return fmt.Errorf("invalid qgroup search header type for %s: %v", path, sh._type)
}
return nil
}
func subvolLookupQgroup(path string) (uint64, error) {
dir, err := openDir(path)
if err != nil {
return 0, err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_ino_lookup_args
args.objectid = C.BTRFS_FIRST_FREE_OBJECTID
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return 0, fmt.Errorf("failed to lookup qgroup for %s: %w", dir, errno)
}
if args.treeid == 0 {
return 0, fmt.Errorf("invalid qgroup id for %s: 0", dir)
}
return uint64(args.treeid), nil
}
func (d *Driver) subvolumesDir() string {
return path.Join(d.home, "subvolumes")
}
func (d *Driver) subvolumesDirID(id string) string {
return path.Join(d.subvolumesDir(), id)
}
func (d *Driver) quotasDir() string {
return path.Join(d.home, "quotas")
}
func (d *Driver) quotasDirID(id string) string {
return path.Join(d.quotasDir(), id)
}
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
return d.Create(id, template, opts)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.Create(id, parent, opts)
}
// Create the filesystem with given id.
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
quotas := d.quotasDir()
subvolumes := d.subvolumesDir()
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil {
return err
}
if parent == "" {
if err := subvolCreate(subvolumes, id); err != nil {
return err
}
if err := os.Chmod(path.Join(subvolumes, id), defaultPerms); err != nil {
return err
}
} else {
parentDir := d.subvolumesDirID(parent)
st, err := os.Stat(parentDir)
if err != nil {
return err
}
if !st.IsDir() {
return fmt.Errorf("%s: not a directory", parentDir)
}
if err := subvolSnapshot(parentDir, subvolumes, id); err != nil {
return err
}
}
var storageOpt map[string]string
if opts != nil {
storageOpt = opts.StorageOpt
}
if _, ok := storageOpt["size"]; ok {
driver := &Driver{}
if err := d.parseStorageOpt(storageOpt, driver); err != nil {
return err
}
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
return err
}
if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil {
return err
}
if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil {
return err
}
}
// if we have a remapped root (user namespaces enabled), change the created snapshot
// dir ownership to match
if rootUID != 0 || rootGID != 0 {
if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil {
return err
}
}
mountLabel := ""
if opts != nil {
mountLabel = opts.MountLabel
}
return label.Relabel(path.Join(subvolumes, id), mountLabel, false)
}
// Parse btrfs storage options
func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error {
// Read size to change the subvolume disk quota per container
for key, val := range storageOpt {
key := strings.ToLower(key)
switch key {
case "size":
size, err := units.RAMInBytes(val)
if err != nil {
return err
}
driver.options.size = uint64(size)
default:
return fmt.Errorf("unknown option %s (%q)", key, storageOpt)
}
}
return nil
}
// Set btrfs storage size
func (d *Driver) setStorageSize(dir string, driver *Driver) error {
if driver.options.size <= 0 {
return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size)))
}
if d.options.minSpace > 0 && driver.options.size < d.options.minSpace {
return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
}
if err := d.enableQuota(); err != nil {
return err
}
if err := subvolLimitQgroup(dir, driver.options.size); err != nil {
return err
}
return nil
}
// Remove the filesystem with given id.
func (d *Driver) Remove(id string) error {
dir := d.subvolumesDirID(id)
if _, err := os.Stat(dir); err != nil {
return err
}
quotasDir := d.quotasDirID(id)
if _, err := os.Stat(quotasDir); err == nil {
if err := os.Remove(quotasDir); err != nil {
return err
}
} else if !os.IsNotExist(err) {
return err
}
// Call updateQuotaStatus() to invoke status update
d.updateQuotaStatus()
if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil {
if d.quotaEnabled {
return err
}
// If quota is not enabled, fallback to rmdir syscall to delete subvolumes.
// This would allow unprivileged user to delete their owned subvolumes
// in kernel >= 4.18 without user_subvol_rm_alowed mount option.
}
if err := system.EnsureRemoveAll(dir); err != nil {
return err
}
if err := d.subvolRescanQuota(); err != nil {
return err
}
return nil
}
// Get the requested filesystem id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
dir := d.subvolumesDirID(id)
st, err := os.Stat(dir)
if err != nil {
return "", err
}
for _, opt := range options.Options {
if opt == "ro" {
// ignore "ro" option
continue
}
return "", fmt.Errorf("btrfs driver does not support mount options")
}
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
}
if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil {
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
if err := d.enableQuota(); err != nil {
return "", err
}
if err := subvolLimitQgroup(dir, size); err != nil {
return "", err
}
}
}
return dir, nil
}
// Put is not implemented for BTRFS as there is no cleanup required for the id.
func (d *Driver) Put(id string) error {
// Get() creates no runtime resources (like e.g. mounts)
// so this doesn't need to do anything.
return nil
}
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
// For BTRFS, it queries the subvolumes path for this ID.
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
return directory.Usage(d.subvolumesDirID(id))
}
// Exists checks if the id exists in the filesystem.
func (d *Driver) Exists(id string) bool {
dir := d.subvolumesDirID(id)
_, err := os.Stat(dir)
return err == nil
}
// List all of the layers known to the driver.
func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(d.subvolumesDir())
if err != nil {
return nil, err
}
results := make([]string, 0, len(entries))
for _, entry := range entries {
if !entry.IsDir() {
continue
}
results = append(results, entry.Name())
}
return results, nil
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil
}

View file

@ -0,0 +1,4 @@
//go:build !linux || !cgo
// +build !linux !cgo
package btrfs

View file

@ -0,0 +1,27 @@
//go:build linux && !btrfs_noversion && cgo
// +build linux,!btrfs_noversion,cgo
package btrfs
/*
#include <btrfs/version.h>
// around version 3.16, they did not define lib version yet
#ifndef BTRFS_LIB_VERSION
#define BTRFS_LIB_VERSION -1
#endif
// upstream had removed it, but now it will be coming back
#ifndef BTRFS_BUILD_VERSION
#define BTRFS_BUILD_VERSION "-"
#endif
*/
import "C"
func btrfsBuildVersion() string {
return string(C.BTRFS_BUILD_VERSION)
}
func btrfsLibVersion() int {
return int(C.BTRFS_LIB_VERSION)
}

View file

@ -0,0 +1,15 @@
//go:build linux && btrfs_noversion && cgo
// +build linux,btrfs_noversion,cgo
package btrfs
// TODO(vbatts) remove this work-around once supported linux distros are on
// btrfs utilities of >= 3.16.1
func btrfsBuildVersion() string {
return "-"
}
func btrfsLibVersion() int {
return -1
}

135
vendor/github.com/containers/storage/drivers/chown.go generated vendored Normal file
View file

@ -0,0 +1,135 @@
package graphdriver
import (
"bytes"
"errors"
"fmt"
"os"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
"github.com/opencontainers/selinux/pkg/pwalk"
)
const (
chownByMapsCmd = "storage-chown-by-maps"
)
func init() {
reexec.Register(chownByMapsCmd, chownByMapsMain)
}
func chownByMapsMain() {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "requires mapping configuration on stdin and directory path")
os.Exit(1)
}
// Read and decode our configuration.
discreteMaps := [4][]idtools.IDMap{}
config := bytes.Buffer{}
if _, err := config.ReadFrom(os.Stdin); err != nil {
fmt.Fprintf(os.Stderr, "error reading configuration: %v", err)
os.Exit(1)
}
if err := json.Unmarshal(config.Bytes(), &discreteMaps); err != nil {
fmt.Fprintf(os.Stderr, "error decoding configuration: %v", err)
os.Exit(1)
}
// Try to chroot. This may not be possible, and on some systems that
// means we just Chdir() to the directory, so from here on we should be
// using relative paths.
if err := chrootOrChdir(os.Args[1]); err != nil {
fmt.Fprintf(os.Stderr, "error chrooting to %q: %v", os.Args[1], err)
os.Exit(1)
}
// Build the mapping objects.
toContainer := idtools.NewIDMappingsFromMaps(discreteMaps[0], discreteMaps[1])
if len(toContainer.UIDs()) == 0 && len(toContainer.GIDs()) == 0 {
toContainer = nil
}
toHost := idtools.NewIDMappingsFromMaps(discreteMaps[2], discreteMaps[3])
if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 {
toHost = nil
}
chowner := newLChowner()
chown := func(path string, info os.FileInfo, _ error) error {
if path == "." {
return nil
}
return chowner.LChown(path, info, toHost, toContainer)
}
if err := pwalk.Walk(".", chown); err != nil {
fmt.Fprintf(os.Stderr, "error during chown: %v", err)
os.Exit(1)
}
os.Exit(0)
}
// ChownPathByMaps walks the filesystem tree, changing the ownership
// information using the toContainer and toHost mappings, using them to replace
// on-disk owner UIDs and GIDs which are "host" values in the first map with
// UIDs and GIDs for "host" values from the second map which correspond to the
// same "container" IDs.
func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error {
if toContainer == nil {
toContainer = &idtools.IDMappings{}
}
if toHost == nil {
toHost = &idtools.IDMappings{}
}
config, err := json.Marshal([4][]idtools.IDMap{toContainer.UIDs(), toContainer.GIDs(), toHost.UIDs(), toHost.GIDs()})
if err != nil {
return err
}
cmd := reexec.Command(chownByMapsCmd, path)
cmd.Stdin = bytes.NewReader(config)
output, err := cmd.CombinedOutput()
if len(output) > 0 && err != nil {
return fmt.Errorf("%s: %w", string(output), err)
}
if err != nil {
return err
}
if len(output) > 0 {
return errors.New(string(output))
}
return nil
}
type naiveLayerIDMapUpdater struct {
ProtoDriver
}
// NewNaiveLayerIDMapUpdater wraps the ProtoDriver in a LayerIDMapUpdater that
// uses ChownPathByMaps to update the ownerships in a layer's filesystem tree.
func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater {
return &naiveLayerIDMapUpdater{ProtoDriver: driver}
}
// UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership
// information using the toContainer and toHost mappings, using them to replace
// on-disk owner UIDs and GIDs which are "host" values in the first map with
// UIDs and GIDs for "host" values from the second map which correspond to the
// same "container" IDs.
func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) (retErr error) {
driver := n.ProtoDriver
options := MountOpts{
MountLabel: mountLabel,
}
layerFs, err := driver.Get(id, options)
if err != nil {
return err
}
defer driverPut(driver, id, &retErr)
return ChownPathByMaps(layerFs, toContainer, toHost)
}
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (n *naiveLayerIDMapUpdater) SupportsShifting() bool {
return false
}

View file

@ -0,0 +1,109 @@
//go:build darwin
// +build darwin
package graphdriver
import (
"errors"
"fmt"
"os"
"sync"
"syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
)
type inode struct {
Dev uint64
Ino uint64
}
type platformChowner struct {
mutex sync.Mutex
inodes map[inode]bool
}
func newLChowner() *platformChowner {
return &platformChowner{
inodes: make(map[inode]bool),
}
}
func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
st, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
i := inode{
Dev: uint64(st.Dev),
Ino: uint64(st.Ino),
}
c.mutex.Lock()
_, found := c.inodes[i]
if !found {
c.inodes[i] = true
}
c.mutex.Unlock()
if found {
return nil
}
// Map an on-disk UID/GID pair from host to container
// using the first map, then back to the host using the
// second map. Skip that first step if they're 0, to
// compensate for cases where a parent layer should
// have had a mapped value, but didn't.
uid, gid := int(st.Uid), int(st.Gid)
if toContainer != nil {
pair := idtools.IDPair{
UID: uid,
GID: gid,
}
mappedUID, mappedGID, err := toContainer.ToContainer(pair)
if err != nil {
if (uid != 0) || (gid != 0) {
return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
}
mappedUID, mappedGID = uid, gid
}
uid, gid = mappedUID, mappedGID
}
if toHost != nil {
pair := idtools.IDPair{
UID: uid,
GID: gid,
}
mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
}
uid, gid = mappedPair.UID, mappedPair.GID
}
if uid != int(st.Uid) || gid != int(st.Gid) {
capability, err := system.Lgetxattr(path, "security.capability")
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Make the change.
if err := system.Lchown(path, uid, gid); err != nil {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Restore the SUID and SGID bits if they were originally set.
if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
if err := system.Chmod(path, info.Mode()); err != nil {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
if capability != nil {
if err := system.Lsetxattr(path, "security.capability", capability, 0); err != nil {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
}
return nil
}

View file

@ -0,0 +1,127 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package graphdriver
import (
"errors"
"fmt"
"os"
"sync"
"syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
)
type inode struct {
Dev uint64
Ino uint64
}
type platformChowner struct {
mutex sync.Mutex
inodes map[inode]string
}
func newLChowner() *platformChowner {
return &platformChowner{
inodes: make(map[inode]string),
}
}
func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
st, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
i := inode{
Dev: uint64(st.Dev),
Ino: uint64(st.Ino),
}
c.mutex.Lock()
oldTarget, found := c.inodes[i]
if !found {
c.inodes[i] = path
}
// If we are dealing with a file with multiple links then keep the lock until the file is
// chowned to avoid a race where we link to the old version if the file is copied up.
if found || st.Nlink > 1 {
defer c.mutex.Unlock()
} else {
c.mutex.Unlock()
}
if found {
// If the dev/inode was already chowned then create a link to the old target instead
// of chowning it again. This is necessary when the underlying file system breaks
// inodes on copy-up (as it is with overlay with index=off) to maintain the original
// link and correct file ownership.
// The target already exists so remove it before creating the link to the new target.
if err := os.Remove(path); err != nil {
return err
}
return os.Link(oldTarget, path)
}
// Map an on-disk UID/GID pair from host to container
// using the first map, then back to the host using the
// second map. Skip that first step if they're 0, to
// compensate for cases where a parent layer should
// have had a mapped value, but didn't.
uid, gid := int(st.Uid), int(st.Gid)
if toContainer != nil {
pair := idtools.IDPair{
UID: uid,
GID: gid,
}
mappedUID, mappedGID, err := toContainer.ToContainer(pair)
if err != nil {
if (uid != 0) || (gid != 0) {
return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err)
}
mappedUID, mappedGID = uid, gid
}
uid, gid = mappedUID, mappedGID
}
if toHost != nil {
pair := idtools.IDPair{
UID: uid,
GID: gid,
}
mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err)
}
uid, gid = mappedPair.UID, mappedPair.GID
}
if uid != int(st.Uid) || gid != int(st.Gid) {
cap, err := system.Lgetxattr(path, "security.capability")
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Make the change.
if err := system.Lchown(path, uid, gid); err != nil {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
// Restore the SUID and SGID bits if they were originally set.
if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
if err := system.Chmod(path, info.Mode()); err != nil {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
if cap != nil {
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
return fmt.Errorf("%s: %w", os.Args[0], err)
}
}
}
return nil
}

View file

@ -0,0 +1,21 @@
//go:build windows
// +build windows
package graphdriver
import (
"os"
"syscall"
"github.com/containers/storage/pkg/idtools"
)
type platformChowner struct{}
func newLChowner() *platformChowner {
return &platformChowner{}
}
func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
return &os.PathError{"lchown", path, syscall.EWINDOWS}
}

View file

@ -0,0 +1,22 @@
//go:build linux || darwin || freebsd || solaris
// +build linux darwin freebsd solaris
package graphdriver
import (
"fmt"
"os"
"syscall"
)
// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chroot(path); err != nil {
return fmt.Errorf("chrooting to %q: %w", path, err)
}
if err := syscall.Chdir(string(os.PathSeparator)); err != nil {
return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}

View file

@ -0,0 +1,15 @@
package graphdriver
import (
"fmt"
"syscall"
)
// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the
// specified path followed by chdir() to the new root directory
func chrootOrChdir(path string) error {
if err := syscall.Chdir(path); err != nil {
return fmt.Errorf("changing to %q: %w", path, err)
}
return nil
}

View file

@ -0,0 +1,307 @@
//go:build cgo
// +build cgo
package copy
/*
#include <linux/fs.h>
#ifndef FICLONE
#define FICLONE _IOW(0x94, 9, int)
#endif
*/
import "C"
import (
"container/list"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
"golang.org/x/sys/unix"
)
// Mode indicates whether to use hardlink or copy content
type Mode int
const (
// Content creates a new file, and copies the content of the file
Content Mode = iota
// Hardlink creates a new hardlink to the existing file
Hardlink
)
// CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
srcFile, err := os.Open(srcPath)
if err != nil {
return err
}
defer srcFile.Close()
if *copyWithFileClone {
_, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
if err == nil {
return nil
}
*copyWithFileClone = false
if err == unix.EXDEV {
*copyWithFileRange = false
}
}
if *copyWithFileRange {
err = doCopyWithFileRange(srcFile, dstFile, fileinfo)
// Trying the file_clone may not have caught the exdev case
// as the ioctl may not have been available (therefore EINVAL)
if err == unix.EXDEV || err == unix.ENOSYS {
*copyWithFileRange = false
} else {
return err
}
}
return legacyCopy(srcFile, dstFile)
}
// CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint
// If the destination file already exists, we shouldn't blow it away
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
if err != nil {
return err
}
defer dstFile.Close()
return CopyRegularToFile(srcPath, dstFile, fileinfo, copyWithFileRange, copyWithFileClone)
}
func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error {
amountLeftToCopy := fileinfo.Size()
for amountLeftToCopy > 0 {
n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0)
if err != nil {
return err
}
amountLeftToCopy = amountLeftToCopy - int64(n)
}
return nil
}
func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
_, err := pools.Copy(dstFile, srcFile)
return err
}
func copyXattr(srcPath, dstPath, attr string) error {
data, err := system.Lgetxattr(srcPath, attr)
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
return err
}
if data != nil {
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
return err
}
}
return nil
}
type fileID struct {
dev uint64
ino uint64
}
type dirMtimeInfo struct {
dstPath *string
stat *syscall.Stat_t
}
// DirCopy copies or hardlinks the contents of one directory to another,
// properly handling xattrs, and soft links
//
// Copying xattrs can be opted out of by passing false for copyXattrs.
func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
copyWithFileRange := true
copyWithFileClone := true
// This is a map of source file inodes to dst file paths
copiedFiles := make(map[fileID]string)
dirsToSetMtimes := list.New()
err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(srcDir, srcPath)
if err != nil {
return err
}
dstPath := filepath.Join(dstDir, relPath)
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("unable to get raw syscall.Stat_t data for %s", srcPath)
}
isHardlink := false
switch mode := f.Mode(); {
case mode.IsRegular():
id := fileID{dev: uint64(stat.Dev), ino: stat.Ino}
if copyMode == Hardlink {
isHardlink = true
if err2 := os.Link(srcPath, dstPath); err2 != nil {
return err2
}
} else if hardLinkDstPath, ok := copiedFiles[id]; ok {
if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil {
return err2
}
} else {
if err2 := CopyRegular(srcPath, dstPath, f, &copyWithFileRange, &copyWithFileClone); err2 != nil {
return err2
}
copiedFiles[id] = dstPath
}
case mode.IsDir():
if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
return err
}
case mode&os.ModeSymlink != 0:
link, err := os.Readlink(srcPath)
if err != nil {
return err
}
if err := os.Symlink(link, dstPath); err != nil {
return err
}
case mode&os.ModeNamedPipe != 0:
if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
return err
}
case mode&os.ModeSocket != 0:
s, err := net.Listen("unix", dstPath)
if err != nil {
return err
}
s.Close()
case mode&os.ModeDevice != 0:
if unshare.IsRootless() {
// cannot create a device if running in user namespace
return nil
}
if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
return err
}
default:
return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath)
}
// Everything below is copying metadata from src to dst. All this metadata
// already shares an inode for hardlinks.
if isHardlink {
return nil
}
if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
if copyXattrs {
if err := doCopyXattrs(srcPath, dstPath); err != nil {
return err
}
}
isSymlink := f.Mode()&os.ModeSymlink != 0
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if !isSymlink {
if err := os.Chmod(dstPath, f.Mode()); err != nil {
return err
}
}
// system.Chtimes doesn't support a NOFOLLOW flag atm
// nolint: unconvert
if f.IsDir() {
dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
} else if !isSymlink {
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
return err
}
} else {
ts := []syscall.Timespec{stat.Atim, stat.Mtim}
if err := system.LUtimesNano(dstPath, ts); err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() {
mtimeInfo := e.Value.(*dirMtimeInfo)
ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim}
if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil {
return err
}
}
return nil
}
func doCopyXattrs(srcPath, dstPath string) error {
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
return err
}
xattrs, err := system.Llistxattr(srcPath)
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
return err
}
for _, key := range xattrs {
if strings.HasPrefix(key, "user.") {
if err := copyXattr(srcPath, dstPath, key); err != nil {
return err
}
}
}
if unshare.IsRootless() {
return nil
}
// We need to copy this attribute if it appears in an overlay upper layer, as
// this function is used to copy those. It is set by overlay if a directory
// is removed and then re-created and should not inherit anything from the
// same dir in the lower dir.
return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
}

View file

@ -0,0 +1,41 @@
//go:build !linux || !cgo
// +build !linux !cgo
package copy //nolint: predeclared
import (
"io"
"os"
"github.com/containers/storage/pkg/chrootarchive"
)
// Mode indicates whether to use hardlink or copy content
type Mode int
const (
// Content creates a new file, and copies the content of the file
Content Mode = iota
)
// DirCopy copies or hardlinks the contents of one directory to another,
// properly handling soft links
func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
}
// CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive,golint // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
f, err := os.Open(srcPath)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(dstFile, f)
return err
}
// CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive,golint // "func name will be used as copy.CopyRegular by other packages, and that stutters"
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
}

View file

@ -0,0 +1,68 @@
package graphdriver
import "sync"
type minfo struct {
check bool
count int
}
// RefCounter is a generic counter for use by graphdriver Get/Put calls
type RefCounter struct {
counts map[string]*minfo
mu sync.Mutex
checker Checker
}
// NewRefCounter returns a new RefCounter
func NewRefCounter(c Checker) *RefCounter {
return &RefCounter{
checker: c,
counts: make(map[string]*minfo),
}
}
// Increment increases the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
return c.incdec(path, func(minfo *minfo) {
minfo.count++
})
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int {
return c.incdec(path, func(minfo *minfo) {
minfo.count--
})
}
func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
} else if !c.checker.IsMounted(path) {
// if the unmount was performed outside of this process (e.g. conmon cleanup)
// the ref counter would lose track of it. Check if it is still mounted.
m.count = 0
}
infoOp(m)
count := m.count
if count <= 0 {
// If the mounted path has been decremented enough have no references,
// then its entry can be removed.
delete(c.counts, path)
}
c.mu.Unlock()
return count
}

View file

@ -0,0 +1,254 @@
//go:build linux && cgo
// +build linux,cgo
package devmapper
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
)
type directLVMConfig struct {
Device string
ThinpPercent uint64
ThinpMetaPercent uint64
AutoExtendPercent uint64
AutoExtendThreshold uint64
MetaDataSize string
}
var (
errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm")
)
func validateLVMConfig(cfg directLVMConfig) error {
if cfg.Device == "" {
return errMissingSetupDevice
}
if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 {
return errThinpPercentMissing
}
if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 {
return errThinpPercentTooBig
}
return nil
}
func checkDevAvailable(dev string) error {
lvmScan, err := exec.LookPath("lvmdiskscan")
if err != nil {
logrus.Debugf("could not find lvmdiskscan: %v", err)
return nil
}
out, err := exec.Command(lvmScan).CombinedOutput()
if err != nil {
logrus.WithError(err).Error(string(out))
return nil
}
if !bytes.Contains(out, []byte(dev)) {
return fmt.Errorf("%s is not available for use with devicemapper", dev)
}
return nil
}
func checkDevInVG(dev string) error {
pvDisplay, err := exec.LookPath("pvdisplay")
if err != nil {
logrus.Debugf("could not find pvdisplay: %v", err)
return nil
}
out, err := exec.Command(pvDisplay, dev).CombinedOutput()
if err != nil {
logrus.WithError(err).Error(string(out))
return nil
}
scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out)))
for scanner.Scan() {
fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name")
if len(fields) > 1 {
// got "VG Name" line"
vg := strings.TrimSpace(fields[1])
if len(vg) > 0 {
return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
}
logrus.Error(fields)
break
}
}
return nil
}
func checkDevHasFS(dev string) error {
blkid, err := exec.LookPath("blkid")
if err != nil {
logrus.Debugf("could not find blkid %v", err)
return nil
}
out, err := exec.Command(blkid, dev).CombinedOutput()
if err != nil {
logrus.WithError(err).Error(string(out))
return nil
}
fields := bytes.Fields(out)
for _, f := range fields {
kv := bytes.Split(f, []byte{'='})
if bytes.Equal(kv[0], []byte("TYPE")) {
v := bytes.Trim(kv[1], "\"")
if len(v) > 0 {
return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
}
return nil
}
}
return nil
}
func verifyBlockDevice(dev string, force bool) error {
absPath, err := filepath.Abs(dev)
if err != nil {
return fmt.Errorf("unable to get absolute path for %s: %s", dev, err)
}
realPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err)
}
if err := checkDevAvailable(absPath); err != nil {
logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath)
if err := checkDevAvailable(realPath); err != nil {
return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath)
}
}
if err := checkDevInVG(realPath); err != nil {
return err
}
if force {
return nil
}
if err := checkDevHasFS(realPath); err != nil {
return err
}
return nil
}
func readLVMConfig(root string) (directLVMConfig, error) {
var cfg directLVMConfig
p := filepath.Join(root, "setup-config.json")
b, err := os.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
return cfg, nil
}
return cfg, fmt.Errorf("reading existing setup config: %w", err)
}
// check if this is just an empty file, no need to produce a json error later if so
if len(b) == 0 {
return cfg, nil
}
if err := json.Unmarshal(b, &cfg); err != nil {
return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
}
return cfg, nil
}
func writeLVMConfig(root string, cfg directLVMConfig) error {
p := filepath.Join(root, "setup-config.json")
b, err := json.Marshal(cfg)
if err != nil {
return fmt.Errorf("marshalling direct lvm config: %w", err)
}
if err := os.WriteFile(p, b, 0o600); err != nil {
return fmt.Errorf("writing direct lvm config to file: %w", err)
}
return nil
}
func setupDirectLVM(cfg directLVMConfig) error {
lvmProfileDir := "/etc/lvm/profile"
binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"}
for _, bin := range binaries {
if _, err := exec.LookPath(bin); err != nil {
return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err)
}
}
err := os.MkdirAll(lvmProfileDir, 0o755)
if err != nil {
return fmt.Errorf("creating lvm profile directory: %w", err)
}
if cfg.AutoExtendPercent == 0 {
cfg.AutoExtendPercent = 20
}
if cfg.AutoExtendThreshold == 0 {
cfg.AutoExtendThreshold = 80
}
if cfg.ThinpPercent == 0 {
cfg.ThinpPercent = 95
}
if cfg.ThinpMetaPercent == 0 {
cfg.ThinpMetaPercent = 1
}
if cfg.MetaDataSize == "" {
cfg.MetaDataSize = "128k"
}
out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput()
if err != nil {
return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
if err != nil {
return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
if err != nil {
return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
if err != nil {
return fmt.Errorf("%v: %w", string(out), err)
}
out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
if err != nil {
return fmt.Errorf("%v: %w", string(out), err)
}
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600)
if err != nil {
return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
}
out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
if err != nil {
return fmt.Errorf("%s: %w", string(out), err)
}
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,109 @@
//go:build linux && cgo
// +build linux,cgo
package devmapper
// Definition of struct dm_task and sub structures (from lvm2)
//
// struct dm_ioctl {
// /*
// * The version number is made up of three parts:
// * major - no backward or forward compatibility,
// * minor - only backwards compatible,
// * patch - both backwards and forwards compatible.
// *
// * All clients of the ioctl interface should fill in the
// * version number of the interface that they were
// * compiled with.
// *
// * All recognized ioctl commands (ie. those that don't
// * return -ENOTTY) fill out this field, even if the
// * command failed.
// */
// uint32_t version[3]; /* in/out */
// uint32_t data_size; /* total size of data passed in
// * including this struct */
// uint32_t data_start; /* offset to start of data
// * relative to start of this struct */
// uint32_t target_count; /* in/out */
// int32_t open_count; /* out */
// uint32_t flags; /* in/out */
// /*
// * event_nr holds either the event number (input and output) or the
// * udev cookie value (input only).
// * The DM_DEV_WAIT ioctl takes an event number as input.
// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
// * use the field as a cookie to return in the DM_COOKIE
// * variable with the uevents they issue.
// * For output, the ioctls return the event number, not the cookie.
// */
// uint32_t event_nr; /* in/out */
// uint32_t padding;
// uint64_t dev; /* in/out */
// char name[DM_NAME_LEN]; /* device name */
// char uuid[DM_UUID_LEN]; /* unique identifier for
// * the block device */
// char data[7]; /* padding or data */
// };
// struct target {
// uint64_t start;
// uint64_t length;
// char *type;
// char *params;
// struct target *next;
// };
// typedef enum {
// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
// } dm_add_node_t;
// struct dm_task {
// int type;
// char *dev_name;
// char *mangled_dev_name;
// struct target *head, *tail;
// int read_only;
// uint32_t event_nr;
// int major;
// int minor;
// int allow_default_major_fallback;
// uid_t uid;
// gid_t gid;
// mode_t mode;
// uint32_t read_ahead;
// uint32_t read_ahead_flags;
// union {
// struct dm_ioctl *v4;
// } dmi;
// char *newname;
// char *message;
// char *geometry;
// uint64_t sector;
// int no_flush;
// int no_open_count;
// int skip_lockfs;
// int query_inactive_table;
// int suppress_identical_reload;
// dm_add_node_t add_node;
// uint64_t existing_table_size;
// int cookie_set;
// int new_uuid;
// int secure_data;
// int retry_remove;
// int enable_checks;
// int expected_errno;
// char *uuid;
// char *mangled_uuid;
// };
//

View file

@ -0,0 +1,271 @@
//go:build linux && cgo
// +build linux,cgo
package devmapper
import (
"fmt"
"os"
"path"
"strconv"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/locker"
"github.com/containers/storage/pkg/mount"
units "github.com/docker/go-units"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const defaultPerms = os.FileMode(0o555)
func init() {
graphdriver.MustRegister("devicemapper", Init)
}
// Driver contains the device set mounted and the home directory
type Driver struct {
*DeviceSet
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
locker *locker.Locker
}
// Init creates a driver with the given home and the set of options.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
DeviceSet: deviceSet,
home: home,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
locker: locker.New(),
}
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
}
func (d *Driver) String() string {
return "devicemapper"
}
// Status returns the status about the driver in a printable format.
// Information returned contains Pool Name, Data File, Metadata file, disk usage by
// the data and metadata, etc.
func (d *Driver) Status() [][2]string {
s := d.DeviceSet.Status()
status := [][2]string{
{"Pool Name", s.PoolName},
{"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
{"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
{"Backing Filesystem", s.BaseDeviceFS},
{"Data file", s.DataFile},
{"Metadata file", s.MetadataFile},
{"Data Space Used", units.HumanSize(float64(s.Data.Used))},
{"Data Space Total", units.HumanSize(float64(s.Data.Total))},
{"Data Space Available", units.HumanSize(float64(s.Data.Available))},
{"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
{"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
{"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
{"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
{"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
}
if len(s.DataLoopback) > 0 {
status = append(status, [2]string{"Data loop file", s.DataLoopback})
}
if len(s.MetadataLoopback) > 0 {
status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
}
if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
status = append(status, [2]string{"Library Version", vStr})
}
return status
}
// Metadata returns a map of information about the device.
func (d *Driver) Metadata(id string) (map[string]string, error) {
m, err := d.DeviceSet.exportDeviceMetadata(id)
if err != nil {
return nil, err
}
metadata := make(map[string]string)
metadata["DeviceId"] = strconv.Itoa(m.deviceID)
metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10)
metadata["DeviceName"] = m.deviceName
return metadata, nil
}
// Cleanup unmounts a device.
func (d *Driver) Cleanup() error {
err := d.DeviceSet.Shutdown(d.home)
umountErr := mount.Unmount(d.home)
// in case we have two errors, prefer the one from Shutdown()
if err != nil {
return err
}
return umountErr
}
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
return d.Create(id, template, opts)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
return d.Create(id, parent, opts)
}
// Create adds a device with a given id and the parent.
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
var storageOpt map[string]string
if opts != nil {
storageOpt = opts.StorageOpt
}
if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
return err
}
return nil
}
// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point.
func (d *Driver) Remove(id string) error {
d.locker.Lock(id)
defer d.locker.Unlock(id)
if !d.DeviceSet.HasDevice(id) {
// Consider removing a non-existing device a no-op
// This is useful to be able to progress on container removal
// if the underlying device has gone away due to earlier errors
return nil
}
// This assumes the device has been properly Get/Put:ed and thus is unmounted
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
return fmt.Errorf("failed to remove device %s: %v", id, err)
}
// Most probably the mount point is already removed on Put()
// (see DeviceSet.UnmountDevice()), but just in case it was not
// let's try to remove it here as well, ignoring errors as
// an older kernel can return EBUSY if e.g. the mount was leaked
// to other mount namespaces. A failure to remove the container's
// mount point is not important and should not be treated
// as a failure to remove the container.
mp := path.Join(d.home, "mnt", id)
err := unix.Rmdir(mp)
if err != nil && !os.IsNotExist(err) {
logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err)
}
return nil
}
// Get mounts a device with given id into the root filesystem
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
d.locker.Lock(id)
defer d.locker.Unlock(id)
mp := path.Join(d.home, "mnt", id)
rootFs := path.Join(mp, "rootfs")
if count := d.ctr.Increment(mp); count > 1 {
return rootFs, nil
}
uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
d.ctr.Decrement(mp)
return "", err
}
// Create the target directories if they don't exist
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil {
d.ctr.Decrement(mp)
return "", err
}
if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
return "", err
}
// Mount the device
if err := d.DeviceSet.MountDevice(id, mp, options); err != nil {
d.ctr.Decrement(mp)
return "", err
}
if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
idFile := path.Join(mp, "id")
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
}
return rootFs, nil
}
// Put unmounts a device and removes it.
func (d *Driver) Put(id string) error {
d.locker.Lock(id)
defer d.locker.Unlock(id)
mp := path.Join(d.home, "mnt", id)
if count := d.ctr.Decrement(mp); count > 0 {
return nil
}
err := d.DeviceSet.UnmountDevice(id, mp)
if err != nil {
logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err)
}
return err
}
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
// For devmapper, it queries the mnt path for this ID.
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
d.locker.Lock(id)
defer d.locker.Unlock(id)
return directory.Usage(path.Join(d.home, "mnt", id))
}
// Exists checks to see if the device exists.
func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id)
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return nil
}

View file

@ -0,0 +1,8 @@
//go:build linux && cgo
// +build linux,cgo
package devmapper
import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary

View file

@ -0,0 +1,89 @@
//go:build linux && cgo
// +build linux,cgo
package devmapper
import (
"bytes"
"fmt"
"os"
"path/filepath"
"golang.org/x/sys/unix"
)
// FIXME: this is copy-pasted from the aufs driver.
// It should be moved into the core.
// Mounted returns true if a mount point exists.
func Mounted(mountpoint string) (bool, error) {
var mntpointSt unix.Stat_t
if err := unix.Stat(mountpoint, &mntpointSt); err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
var parentSt unix.Stat_t
if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil {
return false, err
}
return mntpointSt.Dev != parentSt.Dev, nil
}
type probeData struct {
fsName string
magic string
offset uint64
}
// ProbeFsType returns the filesystem name for the given device id.
func ProbeFsType(device string) (string, error) {
probes := []probeData{
{"btrfs", "_BHRfS_M", 0x10040},
{"ext4", "\123\357", 0x438},
{"xfs", "XFSB", 0},
}
maxLen := uint64(0)
for _, p := range probes {
l := p.offset + uint64(len(p.magic))
if l > maxLen {
maxLen = l
}
}
file, err := os.Open(device)
if err != nil {
return "", err
}
defer file.Close()
buffer := make([]byte, maxLen)
l, err := file.Read(buffer)
if err != nil {
return "", err
}
if uint64(l) != maxLen {
return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device)
}
for _, p := range probes {
if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) {
return p.fsName, nil
}
}
return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device)
}
func joinMountOptions(a, b string) string {
if a == "" {
return b
}
if b == "" {
return a
}
return a + "," + b
}

472
vendor/github.com/containers/storage/drivers/driver.go generated vendored Normal file
View file

@ -0,0 +1,472 @@
package graphdriver
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
)
// FsMagic unsigned id of the filesystem in use.
type FsMagic uint32
const (
// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
FsMagicUnsupported = FsMagic(0x00000000)
)
var (
// All registered drivers
drivers map[string]InitFunc
// ErrNotSupported returned when driver is not supported.
ErrNotSupported = errors.New("driver not supported")
// ErrPrerequisites returned when driver does not meet prerequisites.
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
// ErrIncompatibleFS returned when file system is not supported.
ErrIncompatibleFS = errors.New("backing file system is unsupported for this graph driver")
// ErrLayerUnknown returned when the specified layer is unknown by the driver.
ErrLayerUnknown = errors.New("unknown layer")
)
// CreateOpts contains optional arguments for Create() and CreateReadWrite()
// methods.
type CreateOpts struct {
MountLabel string
StorageOpt map[string]string
*idtools.IDMappings
ignoreChownErrors bool
}
// MountOpts contains optional arguments for Driver.Get() methods.
type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap //nolint: revive,golint
GidMaps []idtools.IDMap //nolint: revive,golint
Options []string
// Volatile specifies whether the container storage can be optimized
// at the cost of not syncing all the dirty files in memory.
Volatile bool
// DisableShifting forces the driver to not do any ID shifting at runtime.
DisableShifting bool
}
// ApplyDiffOpts contains optional arguments for ApplyDiff methods.
type ApplyDiffOpts struct {
Diff io.Reader
Mappings *idtools.IDMappings
MountLabel string
IgnoreChownErrors bool
ForceMask *os.FileMode
}
// InitFunc initializes the storage driver.
type InitFunc func(homedir string, options Options) (Driver, error)
// ProtoDriver defines the basic capabilities of a driver.
// This interface exists solely to be a minimum set of methods
// for client code which choose not to implement the entire Driver
// interface and use the NaiveDiffDriver wrapper constructor.
//
// Use of ProtoDriver directly by client code is not recommended.
type ProtoDriver interface {
// String returns a string representation of this driver.
String() string
// CreateReadWrite creates a new, empty filesystem layer that is ready
// to be used as the storage for a container. Additional options can
// be passed in opts. parent may be "" and opts may be nil.
CreateReadWrite(id, parent string, opts *CreateOpts) error
// Create creates a new, empty, filesystem layer with the
// specified id and parent and options passed in opts. Parent
// may be "" and opts may be nil.
Create(id, parent string, opts *CreateOpts) error
// CreateFromTemplate creates a new filesystem layer with the specified id
// and parent, with contents identical to the specified template layer.
CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error
// Remove attempts to remove the filesystem layer with this id.
Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred
// to by this id. You can optionally specify a mountLabel or "".
// Optionally it gets the mappings used to create the layer.
// Returns the absolute path to the mounted layered filesystem.
Get(id string, options MountOpts) (dir string, err error)
// Put releases the system resources for the specified id,
// e.g, unmounting layered filesystem.
Put(id string) error
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
// Returns a list of layer ids that exist on this driver (does not include
// additional storage layers). Not supported by all backends.
// If the driver requires that layers be removed in a particular order,
// usually due to parent-child relationships that it cares about, The
// list should be sorted well enough so that if all layers need to be
// removed, they can be removed in the order in which they're returned.
ListLayers() ([]string, error)
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
// Returns a set of key-value pairs which give low level information
// about the image/container driver is managing.
Metadata(id string) (map[string]string, error)
// ReadWriteDiskUsage returns the disk usage of the writable directory for the specified ID.
ReadWriteDiskUsage(id string) (*directory.DiskUsage, error)
// Cleanup performs necessary tasks to release resources
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
Cleanup() error
// AdditionalImageStores returns additional image stores supported by the driver
// This API is experimental and can be changed without bumping the major version number.
AdditionalImageStores() []string
}
// DiffDriver is the interface to use to implement graph diffs
type DiffDriver interface {
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
Diff(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The io.Reader must be an uncompressed stream.
ApplyDiff(id string, parent string, options ApplyDiffOpts) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
}
// LayerIDMapUpdater is the interface that implements ID map changes for layers.
type LayerIDMapUpdater interface {
// UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership
// information using the toContainer and toHost mappings, using them to replace
// on-disk owner UIDs and GIDs which are "host" values in the first map with
// UIDs and GIDs for "host" values from the second map which correspond to the
// same "container" IDs. This method should only be called after a layer is
// first created and populated, and before it is mounted, as other changes made
// relative to a parent layer, but before this method is called, may be discarded
// by Diff().
UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a
// image and it is not required to Chown the files when running in an user namespace.
SupportsShifting() bool
}
// Driver is the interface for layered/snapshot file system drivers.
type Driver interface {
ProtoDriver
DiffDriver
LayerIDMapUpdater
}
// DriverWithDifferOutput is the result of ApplyDiffWithDiffer
// This API is experimental and can be changed without bumping the major version number.
type DriverWithDifferOutput struct {
Differ Differ
Target string
Size int64
UIDs []uint32
GIDs []uint32
UncompressedDigest digest.Digest
Metadata string
BigData map[string][]byte
TarSplit []byte
TOCDigest digest.Digest
// Artifacts is a collection of additional artifacts
// generated by the differ that the storage driver can use.
Artifacts map[string]interface{}
}
type DifferOutputFormat int
const (
// DifferOutputFormatDir means the output is a directory and it will
// keep the original layout.
DifferOutputFormatDir = iota
// DifferOutputFormatFlat will store the files by their checksum, in the form
// checksum[0:2]/checksum[2:]
DifferOutputFormatFlat
)
// DifferOptions overrides how the differ work
type DifferOptions struct {
// Format defines the destination directory layout format
Format DifferOutputFormat
}
// Differ defines the interface for using a custom differ.
// This API is experimental and can be changed without bumping the major version number.
type Differ interface {
ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error)
}
// DriverWithDiffer is the interface for direct diff access.
// This API is experimental and can be changed without bumping the major version number.
type DriverWithDiffer interface {
Driver
// ApplyDiffWithDiffer applies the changes using the callback function.
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error)
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// DifferTarget gets the location where files are stored for the layer.
DifferTarget(id string) (string, error)
}
// Capabilities defines a list of capabilities a driver may implement.
// These capabilities are not required; however, they do determine how a
// graphdriver can be used.
type Capabilities struct {
// Flags that this driver is capable of reproducing exactly equivalent
// diffs for read-only layers. If set, clients can rely on the driver
// for consistent tar streams, and avoid extra processing to account
// for potential differences (eg: the layer store's use of tar-split).
ReproducesExactDiffs bool
}
// CapabilityDriver is the interface for layered file system drivers that
// can report on their Capabilities.
type CapabilityDriver interface {
Capabilities() Capabilities
}
// AdditionalLayer represents a layer that is stored in the additional layer store
// This API is experimental and can be changed without bumping the major version number.
type AdditionalLayer interface {
// CreateAs creates a new layer from this additional layer
CreateAs(id, parent string) error
// Info returns arbitrary information stored along with this layer (i.e. `info` file)
Info() (io.ReadCloser, error)
// Blob returns a reader of the raw contents of this layer.
Blob() (io.ReadCloser, error)
// Release tells the additional layer store that we don't use this handler.
Release()
}
// AdditionalLayerStoreDriver is the interface for driver that supports
// additional layer store functionality.
// This API is experimental and can be changed without bumping the major version number.
type AdditionalLayerStoreDriver interface {
Driver
// LookupAdditionalLayer looks up additional layer store by the specified
// digest and ref and returns an object representing that layer.
LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error)
// LookupAdditionalLayer looks up additional layer store by the specified
// ID and returns an object representing that layer.
LookupAdditionalLayerByID(id string) (AdditionalLayer, error)
}
// DiffGetterDriver is the interface for layered file system drivers that
// provide a specialized function for getting file contents for tar-split.
type DiffGetterDriver interface {
Driver
// DiffGetter returns an interface to efficiently retrieve the contents
// of files in a layer.
DiffGetter(id string) (FileGetCloser, error)
}
// FileGetCloser extends the storage.FileGetter interface with a Close method
// for cleaning up.
type FileGetCloser interface {
storage.FileGetter
// Close cleans up any resources associated with the FileGetCloser.
Close() error
}
// Checker makes checks on specified filesystems.
type Checker interface {
// IsMounted returns true if the provided path is mounted for the specific checker
IsMounted(path string) bool
}
func init() {
drivers = make(map[string]InitFunc)
}
// MustRegister registers an InitFunc for the driver, or panics.
// It is suitable for packages init() sections.
func MustRegister(name string, initFunc InitFunc) {
if err := Register(name, initFunc); err != nil {
panic(fmt.Sprintf("failed to register containers/storage graph driver %q: %v", name, err))
}
}
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("name already registered %s", name)
}
drivers[name] = initFunc
return nil
}
// GetDriver initializes and returns the registered driver
func GetDriver(name string, config Options) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(config.Root, name), config)
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
return nil, fmt.Errorf("failed to GetDriver graph %s %s: %w", name, config.Root, ErrNotSupported)
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
func getBuiltinDriver(name, home string, options Options) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
return nil, fmt.Errorf("failed to built-in GetDriver graph %s %s: %w", name, home, ErrNotSupported)
}
// Options is used to initialize a graphdriver
type Options struct {
Root string
RunRoot string
ImageStore string
DriverPriority []string
DriverOptions []string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ExperimentalEnabled bool
}
// New creates the driver and initializes it at the specified root.
func New(name string, config Options) (Driver, error) {
if name != "" {
logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
return GetDriver(name, config)
}
// Guess for prior driver
driversMap := ScanPriorDrivers(config.Root)
// use the supplied priority list unless it is empty
prioList := config.DriverPriority
if len(prioList) == 0 {
prioList = Priority
}
for _, name := range prioList {
if name == "vfs" && len(config.DriverPriority) == 0 {
// don't use vfs even if there is state present and vfs
// has not been explicitly added to the override driver
// priority list
continue
}
if _, prior := driversMap[name]; prior {
// of the state found from prior drivers, check in order of our priority
// which we would prefer
driver, err := getBuiltinDriver(name, config.Root, config)
if err != nil {
// unlike below, we will return error here, because there is prior
// state, and now it is no longer supported/prereq/compatible, so
// something changed and needs attention. Otherwise the daemon's
// images would just "disappear".
logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
return nil, err
}
// abort starting when there are other prior configured drivers
// to ensure the user explicitly selects the driver to load
if len(driversMap)-1 > 0 {
var driversSlice []string
for name := range driversMap {
driversSlice = append(driversSlice, name)
}
return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", config.Root, strings.Join(driversSlice, ", "))
}
logrus.Infof("[graphdriver] using prior storage driver: %s", name)
return driver, nil
}
}
// Check for priority drivers first
for _, name := range prioList {
driver, err := getBuiltinDriver(name, config.Root, config)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
// Check all registered drivers if no priority driver is found
for name, initFunc := range drivers {
driver, err := initFunc(filepath.Join(config.Root, name), config)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
return nil, fmt.Errorf("no supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
return errors.Is(err, ErrNotSupported) || errors.Is(err, ErrPrerequisites) || errors.Is(err, ErrIncompatibleFS)
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
func ScanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
for driver := range drivers {
p := filepath.Join(root, driver)
if _, err := os.Stat(p); err == nil {
driversMap[driver] = true
}
}
return driversMap
}
// driverPut is driver.Put, but errors are handled either by updating mainErr or just logging.
// Typical usage:
//
// func …(…) (err error) {
// …
// defer driverPut(driver, id, &err)
// }
func driverPut(driver ProtoDriver, id string, mainErr *error) {
if err := driver.Put(id); err != nil {
err = fmt.Errorf("unmounting layer %s: %w", id, err)
if *mainErr == nil {
*mainErr = err
} else {
logrus.Errorf(err.Error())
}
}
}

View file

@ -0,0 +1,12 @@
package graphdriver
// Slice of drivers that should be used in order
var Priority = []string{
"vfs",
}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
// Note it is OK to return FsMagicUnsupported on Windows.
return FsMagicUnsupported, nil
}

View file

@ -0,0 +1,48 @@
package graphdriver
import (
"golang.org/x/sys/unix"
"github.com/containers/storage/pkg/mount"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
Priority = []string{
"zfs",
"vfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
}
)
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
// No-op on FreeBSD.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t
if err := unix.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View file

@ -0,0 +1,202 @@
//go:build linux
// +build linux
package graphdriver
import (
"path/filepath"
"github.com/containers/storage/pkg/mount"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const (
// FsMagicAufs filesystem id for Aufs
FsMagicAufs = FsMagic(0x61756673)
// FsMagicBtrfs filesystem id for Btrfs
FsMagicBtrfs = FsMagic(0x9123683E)
// FsMagicCramfs filesystem id for Cramfs
FsMagicCramfs = FsMagic(0x28cd3d45)
// FsMagicEcryptfs filesystem id for eCryptfs
FsMagicEcryptfs = FsMagic(0xf15f)
// FsMagicExtfs filesystem id for Extfs
FsMagicExtfs = FsMagic(0x0000EF53)
// FsMagicF2fs filesystem id for F2fs
FsMagicF2fs = FsMagic(0xF2F52010)
// FsMagicGPFS filesystem id for GPFS
FsMagicGPFS = FsMagic(0x47504653)
// FsMagicJffs2Fs filesystem if for Jffs2Fs
FsMagicJffs2Fs = FsMagic(0x000072b6)
// FsMagicJfs filesystem id for Jfs
FsMagicJfs = FsMagic(0x3153464a)
// FsMagicNfsFs filesystem id for NfsFs
FsMagicNfsFs = FsMagic(0x00006969)
// FsMagicRAMFs filesystem id for RamFs
FsMagicRAMFs = FsMagic(0x858458f6)
// FsMagicReiserFs filesystem id for ReiserFs
FsMagicReiserFs = FsMagic(0x52654973)
// FsMagicSmbFs filesystem id for SmbFs
FsMagicSmbFs = FsMagic(0x0000517B)
// FsMagicSquashFs filesystem id for SquashFs
FsMagicSquashFs = FsMagic(0x73717368)
// FsMagicTmpFs filesystem id for TmpFs
FsMagicTmpFs = FsMagic(0x01021994)
// FsMagicVxFS filesystem id for VxFs
FsMagicVxFS = FsMagic(0xa501fcf5)
// FsMagicXfs filesystem id for Xfs
FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
// FsMagicFUSE filesystem id for FUSE
FsMagicFUSE = FsMagic(0x65735546)
// FsMagicAcfs filesystem id for Acfs
FsMagicAcfs = FsMagic(0x61636673)
// FsMagicAfs filesystem id for Afs
FsMagicAfs = FsMagic(0x5346414f)
// FsMagicCephFs filesystem id for Ceph
FsMagicCephFs = FsMagic(0x00C36400)
// FsMagicCIFS filesystem id for CIFS
FsMagicCIFS = FsMagic(0xFF534D42)
// FsMagicEROFS filesystem id for EROFS
FsMagicEROFS = FsMagic(0xE0F5E1E2)
// FsMagicFHGFS filesystem id for FHGFS
FsMagicFHGFSFs = FsMagic(0x19830326)
// FsMagicIBRIX filesystem id for IBRIX
FsMagicIBRIX = FsMagic(0x013111A8)
// FsMagicKAFS filesystem id for KAFS
FsMagicKAFS = FsMagic(0x6B414653)
// FsMagicLUSTRE filesystem id for LUSTRE
FsMagicLUSTRE = FsMagic(0x0BD00BD0)
// FsMagicNCP filesystem id for NCP
FsMagicNCP = FsMagic(0x564C)
// FsMagicNFSD filesystem id for NFSD
FsMagicNFSD = FsMagic(0x6E667364)
// FsMagicOCFS2 filesystem id for OCFS2
FsMagicOCFS2 = FsMagic(0x7461636F)
// FsMagicPANFS filesystem id for PANFS
FsMagicPANFS = FsMagic(0xAAD7AAEA)
// FsMagicPRLFS filesystem id for PRLFS
FsMagicPRLFS = FsMagic(0x7C7C6673)
// FsMagicSMB2 filesystem id for SMB2
FsMagicSMB2 = FsMagic(0xFE534D42)
// FsMagicSNFS filesystem id for SNFS
FsMagicSNFS = FsMagic(0xBEEFDEAD)
// FsMagicVBOXSF filesystem id for VBOXSF
FsMagicVBOXSF = FsMagic(0x786F4256)
// FsMagicVXFS filesystem id for VXFS
FsMagicVXFS = FsMagic(0xA501FCF5)
)
var (
// Slice of drivers that should be used in an order
Priority = []string{
"overlay",
// We don't support devicemapper without configuration
// "devicemapper",
"aufs",
"btrfs",
"zfs",
"vfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicAufs: "aufs",
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicEcryptfs: "ecryptfs",
FsMagicEROFS: "erofs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",
FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs",
FsMagicOverlay: "overlayfs",
FsMagicRAMFs: "ramfs",
FsMagicReiserFs: "reiserfs",
FsMagicSmbFs: "smb",
FsMagicSquashFs: "squashfs",
FsMagicTmpFs: "tmpfs",
FsMagicUnsupported: "unsupported",
FsMagicVxFS: "vxfs",
FsMagicXfs: "xfs",
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf unix.Statfs_t
path := filepath.Dir(rootpath)
if err := unix.Statfs(path, &buf); err != nil {
return 0, err
}
if _, ok := FsNames[FsMagic(buf.Type)]; !ok {
logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path)
}
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provided FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// isMountPoint checks that the given path is a mount point
func isMountPoint(mountPath string) (bool, error) {
// it is already the root
if mountPath == "/" {
return true, nil
}
var s1, s2 unix.Stat_t
if err := unix.Stat(mountPath, &s1); err != nil {
return true, err
}
if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil {
return true, err
}
return s1.Dev != s2.Dev, nil
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf unix.Statfs_t
if err := unix.Statfs(mountPath, &buf); err != nil {
return false, err
}
if FsMagic(buf.Type) != fsType {
return false, nil
}
return isMountPoint(mountPath)
}

View file

@ -0,0 +1,96 @@
//go:build solaris && cgo
// +build solaris,cgo
package graphdriver
/*
#include <sys/statvfs.h>
#include <stdlib.h>
static inline struct statvfs *getstatfs(char *s) {
struct statvfs *buf;
int err;
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
err = statvfs(s, buf);
return buf;
}
*/
import "C"
import (
"path/filepath"
"unsafe"
"github.com/containers/storage/pkg/mount"
"github.com/sirupsen/logrus"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
Priority = []string{
"zfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return 0, nil
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewFsChecker returns a checker configured for the provided FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
// No-op on Solaris.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct{}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
// Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath))
defer C.free(unsafe.Pointer(cs))
buf := C.getstatfs(cs)
defer C.free(unsafe.Pointer(buf))
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) {
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
return false, ErrPrerequisites
}
return true, nil
}

View file

@ -0,0 +1,14 @@
//go:build !linux && !windows && !freebsd && !solaris && !darwin
// +build !linux,!windows,!freebsd,!solaris,!darwin
package graphdriver
// Slice of drivers that should be used in an order
var Priority = []string{
"unsupported",
}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagicUnsupported, nil
}

View file

@ -0,0 +1,12 @@
package graphdriver
// Slice of drivers that should be used in order
var Priority = []string{
"windowsfilter",
}
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
// Note it is OK to return FsMagicUnsupported on Windows.
return FsMagicUnsupported, nil
}

229
vendor/github.com/containers/storage/drivers/fsdiff.go generated vendored Normal file
View file

@ -0,0 +1,229 @@
package graphdriver
import (
"io"
"os"
"runtime"
"time"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
)
// ApplyUncompressedLayer defines the unpack method used by the graph
// driver.
var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
// NaiveDiffDriver takes a ProtoDriver and adds the
// capability of the Diffing methods which it may or may not
// support on its own. See the comment on the exported
// NewNaiveDiffDriver function below.
// Notably, the AUFS driver doesn't need to be wrapped like this.
type NaiveDiffDriver struct {
ProtoDriver
LayerIDMapUpdater
}
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
//
// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver {
return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater}
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (arch io.ReadCloser, err error) {
startTime := time.Now()
driver := gdw.ProtoDriver
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
if parentMappings == nil {
parentMappings = &idtools.IDMappings{}
}
options := MountOpts{
MountLabel: mountLabel,
Options: []string{"ro"},
}
layerFs, err := driver.Get(id, options)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
driverPut(driver, id, &err)
}
}()
if parent == "" {
archive, err := archive.TarWithOptions(layerFs, &archive.TarOptions{
Compression: archive.Uncompressed,
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
})
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
driverPut(driver, id, &err)
return err
}), nil
}
options.Options = append(options.Options, "ro")
parentFs, err := driver.Get(parent, options)
if err != nil {
return nil, err
}
defer driverPut(driver, parent, &err)
changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
if err != nil {
return nil, err
}
archive, err := archive.ExportChanges(layerFs, changes, idMappings.UIDs(), idMappings.GIDs())
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
driverPut(driver, id, &err)
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ []archive.Change, retErr error) {
driver := gdw.ProtoDriver
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
if parentMappings == nil {
parentMappings = &idtools.IDMappings{}
}
options := MountOpts{
MountLabel: mountLabel,
}
layerFs, err := driver.Get(id, options)
if err != nil {
return nil, err
}
defer driverPut(driver, id, &retErr)
parentFs := ""
if parent != "" {
options := MountOpts{
MountLabel: mountLabel,
Options: []string{"ro"},
}
parentFs, err = driver.Get(parent, options)
if err != nil {
return nil, err
}
defer driverPut(driver, parent, &retErr)
}
return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) {
driver := gdw.ProtoDriver
if options.Mappings == nil {
options.Mappings = &idtools.IDMappings{}
}
// Mount the root filesystem so we can apply the diff/layer.
mountOpts := MountOpts{
MountLabel: options.MountLabel,
}
layerFs, err := driver.Get(id, mountOpts)
if err != nil {
return
}
defer driverPut(driver, id, &err)
defaultForceMask := os.FileMode(0o700)
var forceMask *os.FileMode // = nil
if runtime.GOOS == "darwin" {
forceMask = &defaultForceMask
}
tarOptions := &archive.TarOptions{
InUserNS: unshare.IsRootless(),
IgnoreChownErrors: options.IgnoreChownErrors,
ForceMask: forceMask,
}
if options.Mappings != nil {
tarOptions.UIDMaps = options.Mappings.UIDs()
tarOptions.GIDMaps = options.Mappings.GIDs()
}
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil {
logrus.Errorf("While applying layer: %s", err)
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
return
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
driver := gdw.ProtoDriver
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
if parentMappings == nil {
parentMappings = &idtools.IDMappings{}
}
changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel)
if err != nil {
return
}
options := MountOpts{
MountLabel: mountLabel,
}
layerFs, err := driver.Get(id, options)
if err != nil {
return
}
defer driverPut(driver, id, &err)
return archive.ChangesSize(layerFs, changes), nil
}

View file

@ -0,0 +1,5 @@
package graphdriver
import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary

View file

@ -0,0 +1,310 @@
//go:build linux
// +build linux
package overlay
import (
"errors"
"fmt"
"os"
"path"
"path/filepath"
"syscall"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idmap"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// doesSupportNativeDiff checks whether the filesystem has a bug
// which copies up the opaque flag when copying up an opaque
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
// When these exist naive diff should be used.
func doesSupportNativeDiff(d, mountOpts string) error {
td, err := os.MkdirTemp(d, "opaque-bug-check")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0o755); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0o755); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0o755); err != nil {
return err
}
if err := os.Mkdir(filepath.Join(td, "l3"), 0o755); err != nil {
return err
}
if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return err
}
if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return err
}
// Mark l2/d as opaque
if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil {
return fmt.Errorf("failed to set opaque flag on middle layer: %w", err)
}
mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s"
if unshare.IsRootless() {
mountFlags = mountFlags + ",userxattr"
}
opts := fmt.Sprintf(mountFlags, path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
flags, data := mount.ParseOptions(mountOpts)
if data != "" {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
return fmt.Errorf("failed to mount overlay: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
}
}()
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0o644); err != nil {
return fmt.Errorf("failed to write to merged directory: %w", err)
}
// Check l3/d does not have opaque flag
xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque"))
if err != nil {
return fmt.Errorf("failed to read opaque flag on upper layer: %w", err)
}
if string(xattrOpaque) == "y" {
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
}
// rename "d1" to "d2"
if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil {
// if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
if err.(*os.LinkError).Err == syscall.EXDEV {
return nil
}
return fmt.Errorf("failed to rename dir in merged directory: %w", err)
}
// get the xattr of "d2"
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect"))
if err != nil {
return fmt.Errorf("failed to read redirect flag on upper layer: %w", err)
}
if string(xattrRedirect) == "d1" {
return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")
}
return nil
}
// doesMetacopy checks if the filesystem is going to optimize changes to
// metadata by using nodes marked with an "overlay.metacopy" attribute to avoid
// copying up a file from a lower layer unless/until its contents are being
// modified
func doesMetacopy(d, mountOpts string) (bool, error) {
td, err := os.MkdirTemp(d, "metacopy-check")
if err != nil {
return false, err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
// Make directories l1, l2, work, merged
if err := os.MkdirAll(filepath.Join(td, "l1"), 0o755); err != nil {
return false, err
}
if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0o700); err != nil {
return false, err
}
if err := os.MkdirAll(filepath.Join(td, "l2"), 0o755); err != nil {
return false, err
}
if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return false, err
}
if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return false, err
}
// Mount using the mandatory options and configured options
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "l1"), path.Join(td, "l2"), path.Join(td, "work"))
if unshare.IsRootless() {
opts = fmt.Sprintf("%s,userxattr", opts)
}
flags, data := mount.ParseOptions(mountOpts)
if data != "" {
opts = fmt.Sprintf("%s,%s", opts, data)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
if errors.Is(err, unix.EINVAL) {
logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts)
return false, nil
}
return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
}
}()
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
// as a metadata-only copy
if err := os.Chmod(filepath.Join(td, "merged", "f"), 0o600); err != nil {
return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err)
}
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy"))
if err != nil {
if errors.Is(err, unix.ENOTSUP) {
logrus.Info("metacopy option not supported")
return false, nil
}
return false, fmt.Errorf("metacopy flag was not set on file in upper layer: %w", err)
}
return metacopy != nil, nil
}
// doesVolatile checks if the filesystem supports the "volatile" mount option
func doesVolatile(d string) (bool, error) {
td, err := os.MkdirTemp(d, "volatile-check")
if err != nil {
return false, err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
if err := os.MkdirAll(filepath.Join(td, "lower"), 0o755); err != nil {
return false, err
}
if err := os.MkdirAll(filepath.Join(td, "upper"), 0o755); err != nil {
return false, err
}
if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
return false, err
}
if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
return false, err
}
// Mount using the mandatory options and configured options
opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work"))
if unshare.IsRootless() {
opts = fmt.Sprintf("%s,userxattr", opts)
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err)
}
defer func() {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
}
}()
return true, nil
}
// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of
// a idmapped lower layer.
func supportsIdmappedLowerLayers(home string) (bool, error) {
layerDir, err := os.MkdirTemp(home, "compat")
if err != nil {
return false, err
}
defer func() {
_ = os.RemoveAll(layerDir)
}()
mergedDir := filepath.Join(layerDir, "merged")
lowerDir := filepath.Join(layerDir, "lower")
lowerMappedDir := filepath.Join(layerDir, "lower-mapped")
upperDir := filepath.Join(layerDir, "upper")
workDir := filepath.Join(layerDir, "work")
_ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
_ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
_ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0)
_ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
_ = idtools.MkdirAs(workDir, 0o700, 0, 0)
mapping := []idtools.IDMap{
{
ContainerID: 0,
HostID: 0,
Size: 1,
},
}
pid, cleanupFunc, err := idmap.CreateUsernsProcess(mapping, mapping)
if err != nil {
return false, err
}
defer cleanupFunc()
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
return false, fmt.Errorf("create mapped mount: %w", err)
}
defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH)
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir)
flags := uintptr(0)
if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
return false, err
}
defer func() {
_ = unix.Unmount(mergedDir, unix.MNT_DETACH)
}()
return true, nil
}
// supportsDataOnlyLayers checks if the kernel supports mounting a overlay file system
// that uses data-only layers.
func supportsDataOnlyLayers(home string) (bool, error) {
layerDir, err := os.MkdirTemp(home, "compat")
if err != nil {
return false, err
}
defer func() {
_ = os.RemoveAll(layerDir)
}()
mergedDir := filepath.Join(layerDir, "merged")
lowerDir := filepath.Join(layerDir, "lower")
lowerDirDataOnly := filepath.Join(layerDir, "lower-data")
upperDir := filepath.Join(layerDir, "upper")
workDir := filepath.Join(layerDir, "work")
_ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
_ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
_ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0)
_ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
_ = idtools.MkdirAs(workDir, 0o700, 0, 0)
opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir)
flags := uintptr(0)
if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
return false, err
}
_ = unix.Unmount(mergedDir, unix.MNT_DETACH)
return true, nil
}

View file

@ -0,0 +1,45 @@
//go:build linux
// +build linux
package overlay
import (
"errors"
"io/fs"
"path/filepath"
"strings"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
func scanForMountProgramIndicators(home string) (detected bool, err error) {
err = filepath.WalkDir(home, func(path string, d fs.DirEntry, err error) error {
if detected {
return fs.SkipDir
}
if err != nil {
return err
}
basename := filepath.Base(path)
if strings.HasPrefix(basename, archive.WhiteoutPrefix) {
detected = true
return fs.SkipDir
}
if d.IsDir() {
xattrs, err := system.Llistxattr(path)
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
return err
}
for _, xattr := range xattrs {
if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") {
detected = true
return fs.SkipDir
}
}
}
return nil
})
return detected, err
}

View file

@ -0,0 +1,24 @@
//go:build !linux || !composefs || !cgo
// +build !linux !composefs !cgo
package overlay
import (
"fmt"
)
func composeFsSupported() bool {
return false
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
return fmt.Errorf("composefs is not supported")
}
func mountComposefsBlob(dataDir, mountPoint string) error {
return fmt.Errorf("composefs is not supported")
}
func enableVerityRecursive(path string) (map[string]string, error) {
return nil, fmt.Errorf("composefs is not supported")
}

View file

@ -0,0 +1,219 @@
//go:build linux && composefs && cgo
// +build linux,composefs,cgo
package overlay
import (
"encoding/binary"
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/chunked/dump"
"github.com/containers/storage/pkg/loopback"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
var (
composeFsHelperOnce sync.Once
composeFsHelperPath string
composeFsHelperErr error
)
func getComposeFsHelper() (string, error) {
composeFsHelperOnce.Do(func() {
composeFsHelperPath, composeFsHelperErr = exec.LookPath("mkcomposefs")
})
return composeFsHelperPath, composeFsHelperErr
}
func composeFsSupported() bool {
_, err := getComposeFsHelper()
return err == nil
}
func enableVerity(description string, fd int) error {
enableArg := unix.FsverityEnableArg{
Version: 1,
Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
Block_size: 4096,
}
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
}
return nil
}
type verityDigest struct {
Fsv unix.FsverityDigest
Buf [64]byte
}
func measureVerity(description string, fd int) (string, error) {
var digest verityDigest
digest.Fsv.Size = 64
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
if e1 != 0 {
return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
}
return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
}
func enableVerityRecursive(root string) (map[string]string, error) {
digests := make(map[string]string)
walkFn := func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.Type().IsRegular() {
return nil
}
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
if err := enableVerity(path, int(f.Fd())); err != nil {
return err
}
verity, err := measureVerity(path, int(f.Fd()))
if err != nil {
return err
}
relPath, err := filepath.Rel(root, path)
if err != nil {
return err
}
digests[relPath] = verity
return nil
}
err := filepath.WalkDir(root, walkFn)
return digests, err
}
func getComposefsBlob(dataDir string) string {
return filepath.Join(dataDir, "composefs.blob")
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
if err := os.MkdirAll(composefsDir, 0o700); err != nil {
return err
}
dumpReader, err := dump.GenerateDump(toc, verityDigests)
if err != nil {
return err
}
destFile := getComposefsBlob(composefsDir)
writerJson, err := getComposeFsHelper()
if err != nil {
return fmt.Errorf("failed to find mkcomposefs: %w", err)
}
fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
if err != nil {
return fmt.Errorf("failed to open output file %q: %w", destFile, err)
}
outFd := os.NewFile(uintptr(fd), "outFd")
fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
outFd.Close()
return fmt.Errorf("failed to dup output file: %w", err)
}
newFd := os.NewFile(uintptr(fd), "newFd")
defer newFd.Close()
err = func() error {
// a scope to close outFd before setting fsverity on the read-only fd.
defer outFd.Close()
cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3")
cmd.ExtraFiles = []*os.File{outFd}
cmd.Stderr = os.Stderr
cmd.Stdin = dumpReader
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to convert json to erofs: %w", err)
}
return nil
}()
if err != nil {
return err
}
if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
return nil
}
/*
typedef enum {
LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0),
} lcfs_erofs_flag_t;
struct lcfs_erofs_header_s {
uint32_t magic;
uint32_t version;
uint32_t flags;
uint32_t unused[5];
} __attribute__((__packed__));
*/
// hasACL returns true if the erofs blob has ACLs enabled
func hasACL(path string) (bool, error) {
const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return false, err
}
defer unix.Close(fd)
// do not worry about checking the magic number, if the file is invalid
// we will fail to mount it anyway
flags := make([]byte, 4)
nread, err := unix.Pread(fd, flags, 8)
if err != nil {
return false, err
}
if nread != 4 {
return false, fmt.Errorf("failed to read flags from %q", path)
}
return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil
}
func mountComposefsBlob(dataDir, mountPoint string) error {
blobFile := getComposefsBlob(dataDir)
loop, err := loopback.AttachLoopDeviceRO(blobFile)
if err != nil {
return err
}
defer loop.Close()
hasACL, err := hasACL(blobFile)
if err != nil {
return err
}
mountOpts := "ro"
if !hasACL {
mountOpts += ",noacl"
}
return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts)
}

View file

@ -0,0 +1,8 @@
//go:build linux
// +build linux
package overlay
import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary

View file

@ -0,0 +1,189 @@
//go:build linux
// +build linux
package overlay
import (
"bytes"
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/containers/storage/pkg/reexec"
"golang.org/x/sys/unix"
)
func init() {
reexec.Register("storage-mountfrom", mountOverlayFromMain)
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
type mountOptions struct {
Device string
Target string
Type string
Label string
Flag uint32
}
func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label string) error {
options := &mountOptions{
Device: device,
Target: target,
Type: mType,
Flag: uint32(flags),
Label: label,
}
cmd := reexec.Command("storage-mountfrom", dir)
w, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("mountfrom error on pipe creation: %w", err)
}
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
return fmt.Errorf("mountfrom error on re-exec cmd: %w", err)
}
// write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
return fmt.Errorf("mountfrom json encode to pipe failed: %w", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
return fmt.Errorf("mountfrom re-exec output: %s: error: %w", output, err)
}
return nil
}
// mountfromMain is the entry-point for storage-mountfrom on re-exec.
func mountOverlayFromMain() {
runtime.LockOSThread()
flag.Parse()
var options *mountOptions
if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
fatal(err)
}
// Mount the arguments passed from the specified directory. Some of the
// paths mentioned in the values we pass to the kernel are relative to
// the specified directory.
homedir := flag.Arg(0)
if err := os.Chdir(homedir); err != nil {
fatal(err)
}
pageSize := unix.Getpagesize()
if len(options.Label) < pageSize {
if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
fatal(err)
}
os.Exit(0)
}
// Those arguments still took up too much space. Open the diff
// directories and use their descriptor numbers as lowers, using
// /proc/self/fd as the current directory.
// Split out the various options, since we need to manipulate the
// paths, but we don't want to mess with other options.
var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string
for _, arg := range strings.Split(options.Label, ",") {
kv := strings.SplitN(arg, "=", 2)
switch kv[0] {
case "upperdir":
upperk = "upperdir="
upperv = kv[1]
case "workdir":
workk = "workdir="
workv = kv[1]
case "lowerdir":
lowerk = "lowerdir="
lowerv = kv[1]
case "label":
labelk = "label="
labelv = kv[1]
default:
if others == "" {
others = arg
} else {
others = others + "," + arg
}
}
}
// Make sure upperdir, workdir, and the target are absolute paths.
if upperv != "" && !filepath.IsAbs(upperv) {
upperv = filepath.Join(homedir, upperv)
}
if workv != "" && !filepath.IsAbs(workv) {
workv = filepath.Join(homedir, workv)
}
if !filepath.IsAbs(options.Target) {
options.Target = filepath.Join(homedir, options.Target)
}
// Get a descriptor for each lower, and use that descriptor's name as
// the new value for the list of lowers, because it's shorter.
if lowerv != "" {
lowers := strings.Split(lowerv, ":")
var newLowers []string
dataOnly := false
for _, lowerPath := range lowers {
if lowerPath == "" {
dataOnly = true
continue
}
lowerFd, err := unix.Open(lowerPath, unix.O_RDONLY, 0)
if err != nil {
fatal(err)
}
var lower string
if dataOnly {
lower = fmt.Sprintf(":%d", lowerFd)
dataOnly = false
} else {
lower = fmt.Sprintf("%d", lowerFd)
}
newLowers = append(newLowers, lower)
}
lowerv = strings.Join(newLowers, ":")
}
// Reconstruct the Label field.
options.Label = upperk + upperv + "," + workk + workv + "," + lowerk + lowerv + "," + labelk + labelv + "," + others
options.Label = strings.ReplaceAll(options.Label, ",,", ",")
// Okay, try this, if we managed to make the arguments fit.
var err error
if len(options.Label) < pageSize {
if err := os.Chdir("/proc/self/fd"); err != nil {
fatal(err)
}
err = unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label)
} else {
err = fmt.Errorf("cannot mount layer, mount data %q too large %d >= page size %d", options.Label, len(options.Label), pageSize)
}
// Clean up.
if err != nil {
fmt.Fprintf(os.Stderr, "creating overlay mount to %s, mount_data=%q\n", options.Target, options.Label)
fatal(err)
}
os.Exit(0)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,22 @@
//go:build linux && cgo
// +build linux,cgo
package overlay
import (
"path"
"github.com/containers/storage/pkg/directory"
)
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
// For Overlay, it attempts to check the XFS quota for size, and falls back to
// finding the size of the "diff" directory.
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
usage := &directory.DiskUsage{}
if d.quotaCtl != nil {
err := d.quotaCtl.GetDiskUsage(d.dir(id), usage)
return usage, err
}
return directory.Usage(path.Join(d.dir(id), "diff"))
}

View file

@ -0,0 +1,17 @@
//go:build linux && !cgo
// +build linux,!cgo
package overlay
import (
"path"
"github.com/containers/storage/pkg/directory"
)
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
// For Overlay, it attempts to check the XFS quota for size, and falls back to
// finding the size of the "diff" directory.
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
return directory.Usage(path.Join(d.dir(id), "diff"))
}

View file

@ -0,0 +1,8 @@
//go:build !linux
// +build !linux
package overlay
func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
return false, nil
}

View file

@ -0,0 +1,82 @@
//go:build linux
// +build linux
package overlay
import (
"crypto/rand"
"encoding/base32"
"fmt"
"io"
"os"
"syscall"
"time"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// generateID creates a new random string identifier with the given length
func generateID(l int) string {
const (
// ensures we backoff for less than 450ms total. Use the following to
// select new value, in units of 10ms:
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
maxretries = 9
backoff = time.Millisecond * 10
)
var (
totalBackoff time.Duration
count int
retries int
size = (l*5 + 7) / 8
u = make([]byte, size)
)
// TODO: Include time component, counter component, random component
for {
// This should never block but the read may fail. Because of this,
// we just try to read the random number generator until we get
// something. This is a very rare condition but may happen.
b := time.Duration(retries) * backoff
time.Sleep(b)
totalBackoff += b
n, err := io.ReadFull(rand.Reader, u[count:])
if err != nil {
if retryOnError(err) && retries < maxretries {
count += n
retries++
logrus.Errorf("Generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
panic(fmt.Errorf("reading random number generator, retried for %v: %w", totalBackoff.String(), err))
}
break
}
s := base32.StdEncoding.EncodeToString(u)
return s[:l]
}
// retryOnError tries to detect whether or not retrying would be fruitful.
func retryOnError(err error) bool {
switch err := err.(type) {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == unix.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true
}
}
return false
}

View file

@ -0,0 +1,20 @@
//go:build linux
// +build linux
package overlayutils
import (
"fmt"
graphdriver "github.com/containers/storage/drivers"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
func ErrDTypeNotSupported(driver, backingFs string) error {
msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
msg += " Running without d_type is not supported."
return fmt.Errorf("%s: %w", msg, graphdriver.ErrNotSupported)
}

View file

@ -0,0 +1,453 @@
//go:build linux && !exclude_disk_quota && cgo
// +build linux,!exclude_disk_quota,cgo
//
// projectquota.go - implements XFS project quota controls
// for setting quota limits on a newly created directory.
// It currently supports the legacy XFS specific ioctls.
//
// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
// for both xfs/ext4 for kernel version >= v4.5
//
package quota
/*
#include <stdlib.h>
#include <dirent.h>
#include <linux/fs.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#ifndef FS_XFLAG_PROJINHERIT
struct fsxattr {
__u32 fsx_xflags;
__u32 fsx_extsize;
__u32 fsx_nextents;
__u32 fsx_projid;
unsigned char fsx_pad[12];
};
#define FS_XFLAG_PROJINHERIT 0x00000200
#endif
#ifndef FS_IOC_FSGETXATTR
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
#endif
#ifndef FS_IOC_FSSETXATTR
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
#endif
#ifndef PRJQUOTA
#define PRJQUOTA 2
#endif
#ifndef FS_PROJ_QUOTA
#define FS_PROJ_QUOTA 2
#endif
#ifndef Q_XSETPQLIM
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
#endif
#ifndef Q_XGETPQUOTA
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
#endif
*/
import "C"
import (
"errors"
"fmt"
"math"
"os"
"path"
"path/filepath"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/directory"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const projectIDsAllocatedPerQuotaHome = 10000
// BackingFsBlockDeviceLink is the name of a file that we place in
// the home directory of a driver that uses this package.
const BackingFsBlockDeviceLink = "backingFsBlockDev"
// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
Size uint64
Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
// who wants to apply project quotas to container dirs
type Control struct {
backingFsBlockDev string
nextProjectID uint32
quotas *sync.Map
basePath string
}
// Attempt to generate a unigue projectid. Multiple directories
// per file system can have quota and they need a group of unique
// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000)
// unique projectids, based on the inode of the basepath.
func generateUniqueProjectID(path string) (uint32, error) {
fileinfo, err := os.Stat(path)
if err != nil {
return 0, err
}
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
}
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
return uint32(projectID), nil
}
// NewControl - initialize project quota support.
// Test to make sure that quota can be set on a test dir and find
// the first project id to be used for the next container create.
//
// Returns nil (and error) if project quota is not supported.
//
// First get the project id of the basePath directory.
// This test will fail if the backing fs is not xfs.
//
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
// echo storage:100000 >> /etc/projid
// echo volumes:200000 >> /etc/projid
// xfs_quota -x -c 'project -s storage volumes' /<xfs mount point>
//
// In the example above, the storage directory project id will be used as a
// "start offset" and all containers will be assigned larger project ids
// (e.g. >= 100000). Then the volumes directory project id will be used as a
// "start offset" and all volumes will be assigned larger project ids
// (e.g. >= 200000).
// This is a way to prevent xfs_quota management from conflicting with
// containers/storage.
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
// project ids.
func NewControl(basePath string) (*Control, error) {
//
// Get project id of parent dir as minimal id to be used by driver
//
minProjectID, err := getProjectID(basePath)
if err != nil {
return nil, err
}
if minProjectID == 0 {
// Indicates the storage was never initialized
// Generate a unique range of Projectids for this basepath
minProjectID, err = generateUniqueProjectID(basePath)
if err != nil {
return nil, err
}
}
//
// create backing filesystem device node
//
backingFsBlockDev, err := makeBackingFsDev(basePath)
if err != nil {
return nil, err
}
//
// Test if filesystem supports project quotas by trying to set
// a quota on the first available project id
//
quota := Quota{
Size: 0,
Inodes: 0,
}
q := Control{
backingFsBlockDev: backingFsBlockDev,
nextProjectID: minProjectID + 1,
quotas: &sync.Map{},
basePath: basePath,
}
if err := q.setProjectQuota(minProjectID, quota); err != nil {
return nil, err
}
//
// get first project id to be used for next container
//
err = q.findNextProjectID()
if err != nil {
return nil, err
}
logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
return &q, nil
}
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
func (q *Control) SetQuota(targetPath string, quota Quota) error {
var projectID uint32
value, ok := q.quotas.Load(targetPath)
if ok {
projectID, ok = value.(uint32)
}
if !ok {
projectID = q.nextProjectID
//
// assign project id to new container directory
//
err := setProjectID(targetPath, projectID)
if err != nil {
return err
}
q.quotas.Store(targetPath, projectID)
q.nextProjectID++
}
//
// set the quota limit for the container's project id
//
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
return q.setProjectQuota(projectID, quota)
}
// ClearQuota removes the map entry in the quotas map for targetPath.
// It does so to prevent the map leaking entries as directories are deleted.
func (q *Control) ClearQuota(targetPath string) {
q.quotas.Delete(targetPath)
}
// setProjectQuota - set the quota for project id on xfs block device
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
d.d_flags = C.FS_PROJ_QUOTA
if quota.Size > 0 {
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
d.d_blk_softlimit = d.d_blk_hardlimit
}
if quota.Inodes > 0 {
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT
d.d_ino_hardlimit = C.__u64(quota.Inodes)
d.d_ino_softlimit = d.d_ino_hardlimit
}
cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
runQuotactl := func() syscall.Errno {
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
return errno
}
errno := runQuotactl()
// If the backingFsBlockDev does not exist any more then try to recreate it.
if errors.Is(errno, unix.ENOENT) {
if _, err := makeBackingFsDev(q.basePath); err != nil {
return fmt.Errorf(
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
q.backingFsBlockDev, projectID, err,
)
}
if errno := runQuotactl(); errno != 0 {
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
projectID, q.backingFsBlockDev, errno)
}
} else if errno != 0 {
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
projectID, q.backingFsBlockDev, errno)
}
return nil
}
// GetQuota - get the quota limits of a directory that was configured with SetQuota
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
d, err := q.fsDiskQuotaFromPath(targetPath)
if err != nil {
return err
}
quota.Size = uint64(d.d_blk_hardlimit) * 512
quota.Inodes = uint64(d.d_ino_hardlimit)
return nil
}
// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
d, err := q.fsDiskQuotaFromPath(targetPath)
if err != nil {
return err
}
usage.Size = int64(d.d_bcount) * 512
usage.InodeCount = int64(d.d_icount)
return nil
}
func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
var d C.fs_disk_quota_t
var projectID uint32
value, ok := q.quotas.Load(targetPath)
if ok {
projectID, ok = value.(uint32)
}
if !ok {
return d, fmt.Errorf("quota not found for path : %s", targetPath)
}
//
// get the quota limit for the container's project id
//
cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
projectID, q.backingFsBlockDev, errno)
}
return d, nil
}
// getProjectID - get the project id of path on xfs
func getProjectID(targetPath string) (uint32, error) {
dir, err := openDir(targetPath)
if err != nil {
return 0, err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
return uint32(fsx.fsx_projid), nil
}
// setProjectID - set the project id of path on xfs
func setProjectID(targetPath string, projectID uint32) error {
dir, err := openDir(targetPath)
if err != nil {
return err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
}
return nil
}
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID() error {
files, err := os.ReadDir(q.basePath)
if err != nil {
return fmt.Errorf("read directory failed : %s", q.basePath)
}
for _, file := range files {
if !file.IsDir() {
continue
}
path := filepath.Join(q.basePath, file.Name())
projid, err := getProjectID(path)
if err != nil {
return err
}
if projid > 0 {
q.quotas.Store(path, projid)
}
if q.nextProjectID <= projid {
q.nextProjectID = projid + 1
}
}
return nil
}
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir, errno := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
// Get the backing block device of the driver home directory
// and create a block device node under the home directory
// to be used by quotactl commands
func makeBackingFsDev(home string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(home, &stat); err != nil {
return "", err
}
backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
// Re-create just in case someone copied the home directory over to a new device
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
if !errors.Is(err, unix.EEXIST) {
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
}
// On EEXIST, try again after unlinking any potential leftover.
_ = unix.Unlink(backingFsBlockDevTmp)
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
}
}
if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
}
return backingFsBlockDev, nil
}

View file

@ -0,0 +1,37 @@
//go:build !linux || exclude_disk_quota || !cgo
// +build !linux exclude_disk_quota !cgo
package quota
import (
"errors"
)
// Quota limit params - currently we only control blocks hard limit
type Quota struct {
Size uint64
Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
// who wants to apply project quotas to container dirs
type Control struct{}
func NewControl(basePath string) (*Control, error) {
return nil, errors.New("filesystem does not support, or has not enabled quotas")
}
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
func (q *Control) SetQuota(targetPath string, quota Quota) error {
return errors.New("filesystem does not support, or has not enabled quotas")
}
// GetQuota - get the quota limits of a directory that was configured with SetQuota
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
return errors.New("filesystem does not support, or has not enabled quotas")
}
// ClearQuota removes the map entry in the quotas map for targetPath.
// It does so to prevent the map leaking entries as directories are deleted.
func (q *Control) ClearQuota(targetPath string) {}

Some files were not shown because too many files have changed in this diff Show more