go.mod: bump osbuild/images to 0.55

This commit is contained in:
Sanne Raymaekers 2024-04-13 15:47:23 +02:00
parent eab44ca8a8
commit 22140aa7c9
700 changed files with 30353 additions and 27556 deletions

View file

@ -17,13 +17,13 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
FEDORA_NAME: "fedora-39ß"
FEDORA_NAME: "fedora-39"
DEBIAN_NAME: "debian-13"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20231004t194547z-f39f38d13"
IMAGE_SUFFIX: "c20240102t155643z-f39f38d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -167,7 +167,7 @@ vendor_task:
cross_task:
container:
image: golang:1.19
image: golang:1.20
build_script: make cross
@ -181,6 +181,6 @@ success_task:
- vendor
- cross
container:
image: golang:1.19
image: golang:1.20
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true

View file

@ -41,7 +41,7 @@ containers-storage: ## build using gc on the host
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
codespell:
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w
binary local-binary: containers-storage

View file

@ -1 +1 @@
1.51.0
1.53.0

View file

@ -73,6 +73,13 @@ type ApplyDiffOpts struct {
ForceMask *os.FileMode
}
// ApplyDiffWithDifferOpts contains optional arguments for ApplyDiffWithDiffer methods.
type ApplyDiffWithDifferOpts struct {
ApplyDiffOpts
Flags map[string]interface{}
}
// InitFunc initializes the storage driver.
type InitFunc func(homedir string, options Options) (Driver, error)
@ -189,6 +196,8 @@ type DriverWithDifferOutput struct {
BigData map[string][]byte
TarSplit []byte
TOCDigest digest.Digest
// RootDirMode is the mode of the root directory of the layer, if specified.
RootDirMode *os.FileMode
// Artifacts is a collection of additional artifacts
// generated by the differ that the storage driver can use.
Artifacts map[string]interface{}
@ -205,10 +214,26 @@ const (
DifferOutputFormatFlat
)
type DifferFsVerity int
const (
// DifferFsVerityDisabled means no fs-verity is used
DifferFsVerityDisabled = iota
// DifferFsVerityEnabled means fs-verity is used when supported
DifferFsVerityEnabled
// DifferFsVerityRequired means fs-verity is required
DifferFsVerityRequired
)
// DifferOptions overrides how the differ work
type DifferOptions struct {
// Format defines the destination directory layout format
Format DifferOutputFormat
// UseFsVerity defines whether fs-verity is used
UseFsVerity DifferFsVerity
}
// Differ defines the interface for using a custom differ.
@ -223,9 +248,9 @@ type DriverWithDiffer interface {
Driver
// ApplyDiffWithDiffer applies the changes using the callback function.
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error)
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
// ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// DifferTarget gets the location where files are stored for the layer.

View file

@ -1,5 +1,5 @@
//go:build linux && composefs && cgo
// +build linux,composefs,cgo
//go:build linux && cgo
// +build linux,cgo
package overlay
@ -7,15 +7,13 @@ import (
"encoding/binary"
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/chunked/dump"
"github.com/containers/storage/pkg/fsverity"
"github.com/containers/storage/pkg/loopback"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@ -34,77 +32,6 @@ func getComposeFsHelper() (string, error) {
return composeFsHelperPath, composeFsHelperErr
}
func composeFsSupported() bool {
_, err := getComposeFsHelper()
return err == nil
}
func enableVerity(description string, fd int) error {
enableArg := unix.FsverityEnableArg{
Version: 1,
Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
Block_size: 4096,
}
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
}
return nil
}
type verityDigest struct {
Fsv unix.FsverityDigest
Buf [64]byte
}
func measureVerity(description string, fd int) (string, error) {
var digest verityDigest
digest.Fsv.Size = 64
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
if e1 != 0 {
return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
}
return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
}
func enableVerityRecursive(root string) (map[string]string, error) {
digests := make(map[string]string)
walkFn := func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.Type().IsRegular() {
return nil
}
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
if err := enableVerity(path, int(f.Fd())); err != nil {
return err
}
verity, err := measureVerity(path, int(f.Fd()))
if err != nil {
return err
}
relPath, err := filepath.Rel(root, path)
if err != nil {
return err
}
digests[relPath] = verity
return nil
}
err := filepath.WalkDir(root, walkFn)
return digests, err
}
func getComposefsBlob(dataDir string) string {
return filepath.Join(dataDir, "composefs.blob")
}
@ -156,7 +83,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
return err
}
if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}

View file

@ -1,24 +0,0 @@
//go:build !linux || !composefs || !cgo
// +build !linux !composefs !cgo
package overlay
import (
"fmt"
)
func composeFsSupported() bool {
return false
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
return fmt.Errorf("composefs is not supported")
}
func mountComposefsBlob(dataDir, mountPoint string) error {
return fmt.Errorf("composefs is not supported")
}
func enableVerityRecursive(path string) (map[string]string, error) {
return nil, fmt.Errorf("composefs is not supported")
}

View file

@ -82,7 +82,8 @@ const (
lowerFile = "lower"
maxDepth = 500
tocArtifact = "toc"
tocArtifact = "toc"
fsVerityDigestsArtifact = "fs-verity-digests"
// idLength represents the number of random characters
// which can be used to create the unique link identifier
@ -105,6 +106,7 @@ type overlayOptions struct {
mountOptions string
ignoreChownErrors bool
forceMask *os.FileMode
useComposefs bool
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
@ -122,6 +124,7 @@ type Driver struct {
supportsDType bool
supportsVolatile *bool
usingMetacopy bool
usingComposefs bool
supportsIDMappedMounts *bool
}
@ -293,7 +296,7 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
// a bunch of network file systems...
case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs,
graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS,
graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP,
graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS,
graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS,
@ -307,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
// If custom --imagestore is selected never
// ditch the original graphRoot, instead add it as
// additionalImageStore so its images can still be
// read and used.
if options.ImageStore != "" {
graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore)
options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore)
// complete base name with driver name included
options.ImageStore = filepath.Join(options.ImageStore, "overlay")
}
opts, err := parseOptions(options.DriverOptions)
if err != nil {
return nil, err
@ -387,6 +380,22 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
}
if opts.useComposefs {
if unshare.IsRootless() {
return nil, fmt.Errorf("composefs is not supported in user namespaces")
}
supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome)
if err != nil {
return nil, err
}
if !supportsDataOnly {
return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS)
}
if _, err := getComposeFsHelper(); err != nil {
return nil, fmt.Errorf("composefs helper program not found: %w", err)
}
}
var usingMetacopy bool
var supportsDType bool
var supportsVolatile *bool
@ -448,6 +457,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
supportsVolatile: supportsVolatile,
usingComposefs: opts.useComposefs,
options: *opts,
}
@ -555,6 +565,12 @@ func parseOptions(options []string) (*overlayOptions, error) {
withReference: withReference,
})
}
case "use_composefs":
logrus.Debugf("overlay: use_composefs=%s", val)
o.useComposefs, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
case "mount_program":
logrus.Debugf("overlay: mount_program=%s", val)
if val != "" {
@ -782,7 +798,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
}
func (d *Driver) useNaiveDiff() bool {
if d.useComposeFs() {
if d.usingComposefs {
return true
}
@ -837,22 +853,15 @@ func (d *Driver) Status() [][2]string {
// Metadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
dir, imagestore, _ := d.dir2(id)
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
workDirBase := dir
if imagestore != "" {
if _, err := os.Stat(dir); err != nil {
return nil, err
}
workDirBase = imagestore
}
metadata := map[string]string{
"WorkDir": path.Join(workDirBase, "work"),
"MergedDir": path.Join(workDirBase, "merged"),
"UpperDir": path.Join(workDirBase, "diff"),
"WorkDir": path.Join(dir, "work"),
"MergedDir": path.Join(dir, "merged"),
"UpperDir": path.Join(dir, "diff"),
}
lowerDirs, err := d.getLowerDirs(id)
@ -870,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
// is being shutdown. For now, we just have to unmount the bind mounted
// we had created.
func (d *Driver) Cleanup() error {
_ = os.RemoveAll(d.getStagingDir())
_ = os.RemoveAll(filepath.Join(d.home, stagingDir))
return mount.Unmount(d.home)
}
@ -966,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return d.create(id, parent, opts, true)
}
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
dir, imageStore, _ := d.dir2(id)
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) {
dir, homedir, _ := d.dir2(id, readOnly)
disableQuota := readOnly
uidMaps := d.uidMaps
gidMaps := d.gidMaps
@ -978,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
// Make the link directory if it does not exist
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil {
return err
}
@ -995,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
return err
}
workDirBase := dir
if imageStore != "" {
workDirBase = imageStore
if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil {
return err
}
}
if parent != "" {
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
// If parentBase path is additional image store, select the image contained in parentBase.
// See https://github.com/containers/podman/issues/19748
if parentImageStore != "" && !inAdditionalStore {
parentBase = parentImageStore
}
parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
@ -1029,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
return err
}
if imageStore != "" {
if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil {
return err
}
}
defer func() {
// Clean up on failure
@ -1041,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err2 := os.RemoveAll(dir); err2 != nil {
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
}
if imageStore != "" {
if err2 := os.RemoveAll(workDirBase); err2 != nil {
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2)
}
}
}
}()
@ -1068,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
return err
}
if imageStore != "" {
if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
return err
}
}
}
perms := defaultPerms
@ -1081,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
if parent != "" {
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
// If parentBase path is additional image store, select the image contained in parentBase.
// See https://github.com/containers/podman/issues/19748
if parentImageStore != "" && !inAdditionalStore {
parentBase = parentImageStore
}
parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
@ -1094,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
perms = os.FileMode(st.Mode())
}
if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
return err
}
lid := generateID(idLength)
linkBase := path.Join("..", id, "diff")
if imageStore != "" {
linkBase = path.Join(imageStore, "diff")
}
if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil {
if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil {
return err
}
@ -1113,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
return err
}
if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
return err
}
@ -1198,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) {
}
func (d *Driver) dir(id string) string {
p, _, _ := d.dir2(id)
p, _, _ := d.dir2(id, false)
return p
}
func (d *Driver) dir2(id string) (string, string, bool) {
newpath := path.Join(d.home, id)
imageStore := ""
func (d *Driver) getAllImageStores() []string {
additionalImageStores := d.AdditionalImageStores()
if d.imageStore != "" {
imageStore = path.Join(d.imageStore, id)
additionalImageStores = append([]string{d.imageStore}, additionalImageStores...)
}
return additionalImageStores
}
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
var homedir string
if useImageStore && d.imageStore != "" {
homedir = path.Join(d.imageStore, d.name)
} else {
homedir = d.home
}
newpath := path.Join(homedir, id)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
for _, p := range d.getAllImageStores() {
l := path.Join(p, d.name, id)
_, err = os.Stat(l)
if err == nil {
return l, imageStore, true
return l, homedir, true
}
}
}
return newpath, imageStore, false
return newpath, homedir, false
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
@ -1427,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
}
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
dir, imageStore, inAdditionalStore := d.dir2(id)
dir, _, inAdditionalStore := d.dir2(id, false)
if _, err := os.Stat(dir); err != nil {
return "", err
}
workDirBase := dir
if imageStore != "" {
workDirBase = imageStore
}
readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
@ -1539,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}()
composeFsLayers := []string{}
composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers")
composeFsLayersDir := filepath.Join(dir, "composefs-layers")
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
composefsBlob := d.getComposefsData(lowerID)
_, err = os.Stat(composefsBlob)
@ -1573,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return dest, nil
}
diffDir := path.Join(workDirBase, "diff")
diffDir := path.Join(dir, "diff")
if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
return "", err
@ -1591,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lower := ""
newpath := path.Join(d.home, l)
if st, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
for _, p := range d.getAllImageStores() {
lower = path.Join(p, d.name, l)
if st2, err2 := os.Stat(lower); err2 == nil {
if !permsKnown {
@ -1659,21 +1645,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
}
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
}
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
return "", err
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
}
mergedDir := path.Join(workDirBase, "merged")
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
if !inAdditionalStore {
return "", err
}
// if it is in an additional store, do not fail if the directory already exists
if _, err2 := os.Stat(diffDir); err2 != nil {
return "", err
}
}
mergedDir := path.Join(dir, "merged")
// Create the driver merged dir
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return "", err
@ -1691,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}()
workdir := path.Join(workDirBase, "work")
workdir := path.Join(dir, "work")
if d.options.mountProgram == "" && unshare.IsRootless() {
optsList = append(optsList, "userxattr")
@ -1841,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
dir := d.dir(id)
dir, _, inAdditionalStore := d.dir2(id, false)
if _, err := os.Stat(dir); err != nil {
return err
}
@ -1902,11 +1894,27 @@ func (d *Driver) Put(id string) error {
}
}
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err)
return fmt.Errorf("removing mount point %q: %w", mountpoint, err)
}
if !inAdditionalStore {
uid, gid := int(0), int(0)
fi, err := os.Stat(mountpoint)
if err != nil {
return err
}
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
uid, gid = int(stat.Uid), int(stat.Gid)
}
tmpMountpoint := path.Join(dir, "merged.1")
if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) {
return err
}
// rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains
// its atomic semantic. In this way the "merged" directory is never removed.
if err := unix.Rename(tmpMountpoint, mountpoint); err != nil {
logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err)
return fmt.Errorf("replacing mount point %q: %w", mountpoint, err)
}
}
return nil
}
@ -1994,14 +2002,18 @@ func (g *overlayFileGetter) Close() error {
return nil
}
func (d *Driver) getStagingDir() string {
return filepath.Join(d.home, stagingDir)
func (d *Driver) getStagingDir(id string) string {
_, homedir, _ := d.dir2(id, d.imageStore != "")
return filepath.Join(homedir, stagingDir)
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences, either for this layer, or one of our
// lowers if we're just a template directory. Used for direct access for tar-split.
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
if d.usingComposefs {
return nil, nil
}
p, err := d.getDiffPath(id)
if err != nil {
return nil, err
@ -2018,9 +2030,9 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
return os.RemoveAll(stagingDirectory)
}
func (d *Driver) supportsDataOnlyLayers() (bool, error) {
func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
feature := "dataonly-layers"
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature)
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if overlayCacheResult {
logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
@ -2029,27 +2041,15 @@ func (d *Driver) supportsDataOnlyLayers() (bool, error) {
logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
return false, errors.New(overlayCacheText)
}
supportsDataOnly, err := supportsDataOnlyLayers(d.home)
if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil {
supportsDataOnly, err := supportsDataOnlyLayers(home)
if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil {
return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2)
}
return supportsDataOnly, err
}
func (d *Driver) useComposeFs() bool {
if !composeFsSupported() || unshare.IsRootless() {
return false
}
supportsDataOnlyLayers, err := d.supportsDataOnlyLayers()
if err != nil {
logrus.Debugf("Check for data-only layers failed with: %v", err)
return false
}
return supportsDataOnlyLayers
}
// ApplyDiff applies the changes in the new layer using the specified function
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
var idMappings *idtools.IDMappings
if options != nil {
idMappings = options.Mappings
@ -2061,15 +2061,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
var applyDir string
if id == "" {
err := os.MkdirAll(d.getStagingDir(), 0o700)
stagingDir := d.getStagingDir(id)
err := os.MkdirAll(stagingDir, 0o700)
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
applyDir, err = os.MkdirTemp(stagingDir, "")
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
perms := defaultPerms
if d.options.forceMask != nil {
perms = *d.options.forceMask
}
if err := os.Chmod(applyDir, perms); err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
} else {
var err error
applyDir, err = d.getDiffPath(id)
@ -2083,8 +2090,9 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
differOptions := graphdriver.DifferOptions{
Format: graphdriver.DifferOutputFormatDir,
}
if d.useComposeFs() {
if d.usingComposefs {
differOptions.Format = graphdriver.DifferOutputFormatFlat
differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled
}
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
@ -2100,33 +2108,42 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
}
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error {
if filepath.Dir(stagingDirectory) != d.getStagingDir() {
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
stagingDirectory := diffOutput.Target
if filepath.Dir(stagingDirectory) != d.getStagingDir(id) {
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}
if d.useComposeFs() {
// FIXME: move this logic into the differ so we don't have to open
// the file twice.
verityDigests, err := enableVerityRecursive(stagingDirectory)
if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
toc := diffOutput.Artifacts[tocArtifact]
if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil {
return err
}
}
diffPath, err := d.getDiffPath(id)
if err != nil {
return err
}
// If the current layer doesn't set the mode for the parent, override it with the parent layer's mode.
if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" {
parentDiffPath, err := d.getDiffPath(parent)
if err != nil {
return err
}
parentSt, err := os.Stat(parentDiffPath)
if err != nil {
return err
}
if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil {
return err
}
}
if d.usingComposefs {
toc := diffOutput.Artifacts[tocArtifact]
verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string)
if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil {
return err
}
}
if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) {
return err
}
diffOutput.UncompressedDigest = diffOutput.TOCDigest
return os.Rename(stagingDirectory, diffPath)
}
@ -2179,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string {
}
func (d *Driver) getDiffPath(id string) (string, error) {
dir, imagestore, _ := d.dir2(id)
base := dir
if imagestore != "" {
base = imagestore
}
return redirectDiffIfAdditionalLayer(path.Join(base, "diff"))
dir := d.dir(id)
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
}
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@ -2275,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string {
// by toContainer to those specified by toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
var err error
dir, imagestore, _ := d.dir2(id)
base := dir
if imagestore != "" {
base = imagestore
}
diffDir := filepath.Join(base, "diff")
dir := d.dir(id)
diffDir := filepath.Join(dir, "diff")
rootUID, rootGID := 0, 0
if toHost != nil {

View file

@ -4,6 +4,7 @@
package overlay
import (
"fmt"
"path"
"github.com/containers/storage/pkg/directory"
@ -15,3 +16,15 @@ import (
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
return directory.Usage(path.Join(d.dir(id), "diff"))
}
func getComposeFsHelper() (string, error) {
return "", fmt.Errorf("composefs not supported on this build")
}
func mountComposefsBlob(dataDir, mountPoint string) error {
return fmt.Errorf("composefs not supported on this build")
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
return fmt.Errorf("composefs not supported on this build")
}

View file

@ -1,453 +1,5 @@
//go:build linux && !exclude_disk_quota && cgo
// +build linux,!exclude_disk_quota,cgo
//
// projectquota.go - implements XFS project quota controls
// for setting quota limits on a newly created directory.
// It currently supports the legacy XFS specific ioctls.
//
// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
// for both xfs/ext4 for kernel version >= v4.5
//
package quota
/*
#include <stdlib.h>
#include <dirent.h>
#include <linux/fs.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#ifndef FS_XFLAG_PROJINHERIT
struct fsxattr {
__u32 fsx_xflags;
__u32 fsx_extsize;
__u32 fsx_nextents;
__u32 fsx_projid;
unsigned char fsx_pad[12];
};
#define FS_XFLAG_PROJINHERIT 0x00000200
#endif
#ifndef FS_IOC_FSGETXATTR
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
#endif
#ifndef FS_IOC_FSSETXATTR
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
#endif
#ifndef PRJQUOTA
#define PRJQUOTA 2
#endif
#ifndef FS_PROJ_QUOTA
#define FS_PROJ_QUOTA 2
#endif
#ifndef Q_XSETPQLIM
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
#endif
#ifndef Q_XGETPQUOTA
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
#endif
*/
import "C"
import (
"errors"
"fmt"
"math"
"os"
"path"
"path/filepath"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/directory"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const projectIDsAllocatedPerQuotaHome = 10000
// BackingFsBlockDeviceLink is the name of a file that we place in
// the home directory of a driver that uses this package.
const BackingFsBlockDeviceLink = "backingFsBlockDev"
// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
Size uint64
Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
// who wants to apply project quotas to container dirs
type Control struct {
backingFsBlockDev string
nextProjectID uint32
quotas *sync.Map
basePath string
}
// Attempt to generate a unigue projectid. Multiple directories
// per file system can have quota and they need a group of unique
// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000)
// unique projectids, based on the inode of the basepath.
func generateUniqueProjectID(path string) (uint32, error) {
fileinfo, err := os.Stat(path)
if err != nil {
return 0, err
}
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
}
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
return uint32(projectID), nil
}
// NewControl - initialize project quota support.
// Test to make sure that quota can be set on a test dir and find
// the first project id to be used for the next container create.
//
// Returns nil (and error) if project quota is not supported.
//
// First get the project id of the basePath directory.
// This test will fail if the backing fs is not xfs.
//
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
// echo storage:100000 >> /etc/projid
// echo volumes:200000 >> /etc/projid
// xfs_quota -x -c 'project -s storage volumes' /<xfs mount point>
//
// In the example above, the storage directory project id will be used as a
// "start offset" and all containers will be assigned larger project ids
// (e.g. >= 100000). Then the volumes directory project id will be used as a
// "start offset" and all volumes will be assigned larger project ids
// (e.g. >= 200000).
// This is a way to prevent xfs_quota management from conflicting with
// containers/storage.
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
// project ids.
func NewControl(basePath string) (*Control, error) {
//
// Get project id of parent dir as minimal id to be used by driver
//
minProjectID, err := getProjectID(basePath)
if err != nil {
return nil, err
}
if minProjectID == 0 {
// Indicates the storage was never initialized
// Generate a unique range of Projectids for this basepath
minProjectID, err = generateUniqueProjectID(basePath)
if err != nil {
return nil, err
}
}
//
// create backing filesystem device node
//
backingFsBlockDev, err := makeBackingFsDev(basePath)
if err != nil {
return nil, err
}
//
// Test if filesystem supports project quotas by trying to set
// a quota on the first available project id
//
quota := Quota{
Size: 0,
Inodes: 0,
}
q := Control{
backingFsBlockDev: backingFsBlockDev,
nextProjectID: minProjectID + 1,
quotas: &sync.Map{},
basePath: basePath,
}
if err := q.setProjectQuota(minProjectID, quota); err != nil {
return nil, err
}
//
// get first project id to be used for next container
//
err = q.findNextProjectID()
if err != nil {
return nil, err
}
logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
return &q, nil
}
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
func (q *Control) SetQuota(targetPath string, quota Quota) error {
var projectID uint32
value, ok := q.quotas.Load(targetPath)
if ok {
projectID, ok = value.(uint32)
}
if !ok {
projectID = q.nextProjectID
//
// assign project id to new container directory
//
err := setProjectID(targetPath, projectID)
if err != nil {
return err
}
q.quotas.Store(targetPath, projectID)
q.nextProjectID++
}
//
// set the quota limit for the container's project id
//
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
return q.setProjectQuota(projectID, quota)
}
// ClearQuota removes the map entry in the quotas map for targetPath.
// It does so to prevent the map leaking entries as directories are deleted.
func (q *Control) ClearQuota(targetPath string) {
q.quotas.Delete(targetPath)
}
// setProjectQuota - set the quota for project id on xfs block device
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
d.d_flags = C.FS_PROJ_QUOTA
if quota.Size > 0 {
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
d.d_blk_softlimit = d.d_blk_hardlimit
}
if quota.Inodes > 0 {
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT
d.d_ino_hardlimit = C.__u64(quota.Inodes)
d.d_ino_softlimit = d.d_ino_hardlimit
}
cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
runQuotactl := func() syscall.Errno {
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
return errno
}
errno := runQuotactl()
// If the backingFsBlockDev does not exist any more then try to recreate it.
if errors.Is(errno, unix.ENOENT) {
if _, err := makeBackingFsDev(q.basePath); err != nil {
return fmt.Errorf(
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
q.backingFsBlockDev, projectID, err,
)
}
if errno := runQuotactl(); errno != 0 {
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
projectID, q.backingFsBlockDev, errno)
}
} else if errno != 0 {
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
projectID, q.backingFsBlockDev, errno)
}
return nil
}
// GetQuota - get the quota limits of a directory that was configured with SetQuota
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
d, err := q.fsDiskQuotaFromPath(targetPath)
if err != nil {
return err
}
quota.Size = uint64(d.d_blk_hardlimit) * 512
quota.Inodes = uint64(d.d_ino_hardlimit)
return nil
}
// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
d, err := q.fsDiskQuotaFromPath(targetPath)
if err != nil {
return err
}
usage.Size = int64(d.d_bcount) * 512
usage.InodeCount = int64(d.d_icount)
return nil
}
func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
var d C.fs_disk_quota_t
var projectID uint32
value, ok := q.quotas.Load(targetPath)
if ok {
projectID, ok = value.(uint32)
}
if !ok {
return d, fmt.Errorf("quota not found for path : %s", targetPath)
}
//
// get the quota limit for the container's project id
//
cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
projectID, q.backingFsBlockDev, errno)
}
return d, nil
}
// getProjectID - get the project id of path on xfs
func getProjectID(targetPath string) (uint32, error) {
dir, err := openDir(targetPath)
if err != nil {
return 0, err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
return uint32(fsx.fsx_projid), nil
}
// setProjectID - set the project id of path on xfs
func setProjectID(targetPath string, projectID uint32) error {
dir, err := openDir(targetPath)
if err != nil {
return err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
}
return nil
}
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID() error {
files, err := os.ReadDir(q.basePath)
if err != nil {
return fmt.Errorf("read directory failed : %s", q.basePath)
}
for _, file := range files {
if !file.IsDir() {
continue
}
path := filepath.Join(q.basePath, file.Name())
projid, err := getProjectID(path)
if err != nil {
return err
}
if projid > 0 {
q.quotas.Store(path, projid)
}
if q.nextProjectID <= projid {
q.nextProjectID = projid + 1
}
}
return nil
}
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir, errno := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
// Get the backing block device of the driver home directory
// and create a block device node under the home directory
// to be used by quotactl commands
func makeBackingFsDev(home string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(home, &stat); err != nil {
return "", err
}
backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
// Re-create just in case someone copied the home directory over to a new device
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
if !errors.Is(err, unix.EEXIST) {
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
}
// On EEXIST, try again after unlinking any potential leftover.
_ = unix.Unlink(backingFsBlockDevTmp)
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
}
}
if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
}
return backingFsBlockDev, nil
}

View file

@ -0,0 +1,449 @@
//go:build linux && !exclude_disk_quota && cgo
// +build linux,!exclude_disk_quota,cgo
//
// projectquota.go - implements XFS project quota controls
// for setting quota limits on a newly created directory.
// It currently supports the legacy XFS specific ioctls.
//
// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
// for both xfs/ext4 for kernel version >= v4.5
//
package quota
/*
#include <stdlib.h>
#include <dirent.h>
#include <linux/fs.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#ifndef FS_XFLAG_PROJINHERIT
struct fsxattr {
__u32 fsx_xflags;
__u32 fsx_extsize;
__u32 fsx_nextents;
__u32 fsx_projid;
unsigned char fsx_pad[12];
};
#define FS_XFLAG_PROJINHERIT 0x00000200
#endif
#ifndef FS_IOC_FSGETXATTR
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
#endif
#ifndef FS_IOC_FSSETXATTR
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
#endif
#ifndef PRJQUOTA
#define PRJQUOTA 2
#endif
#ifndef FS_PROJ_QUOTA
#define FS_PROJ_QUOTA 2
#endif
#ifndef Q_XSETPQLIM
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
#endif
#ifndef Q_XGETPQUOTA
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
#endif
*/
import "C"
import (
"errors"
"fmt"
"math"
"os"
"path"
"path/filepath"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/directory"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const projectIDsAllocatedPerQuotaHome = 10000
// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
Size uint64
Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
// who wants to apply project quotas to container dirs
type Control struct {
backingFsBlockDev string
nextProjectID uint32
quotas *sync.Map
basePath string
}
// Attempt to generate a unigue projectid. Multiple directories
// per file system can have quota and they need a group of unique
// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000)
// unique projectids, based on the inode of the basepath.
func generateUniqueProjectID(path string) (uint32, error) {
fileinfo, err := os.Stat(path)
if err != nil {
return 0, err
}
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
}
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
return uint32(projectID), nil
}
// NewControl - initialize project quota support.
// Test to make sure that quota can be set on a test dir and find
// the first project id to be used for the next container create.
//
// Returns nil (and error) if project quota is not supported.
//
// First get the project id of the basePath directory.
// This test will fail if the backing fs is not xfs.
//
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
// echo storage:100000 >> /etc/projid
// echo volumes:200000 >> /etc/projid
// xfs_quota -x -c 'project -s storage volumes' /<xfs mount point>
//
// In the example above, the storage directory project id will be used as a
// "start offset" and all containers will be assigned larger project ids
// (e.g. >= 100000). Then the volumes directory project id will be used as a
// "start offset" and all volumes will be assigned larger project ids
// (e.g. >= 200000).
// This is a way to prevent xfs_quota management from conflicting with
// containers/storage.
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
// project ids.
func NewControl(basePath string) (*Control, error) {
//
// Get project id of parent dir as minimal id to be used by driver
//
minProjectID, err := getProjectID(basePath)
if err != nil {
return nil, err
}
if minProjectID == 0 {
// Indicates the storage was never initialized
// Generate a unique range of Projectids for this basepath
minProjectID, err = generateUniqueProjectID(basePath)
if err != nil {
return nil, err
}
}
//
// create backing filesystem device node
//
backingFsBlockDev, err := makeBackingFsDev(basePath)
if err != nil {
return nil, err
}
//
// Test if filesystem supports project quotas by trying to set
// a quota on the first available project id
//
quota := Quota{
Size: 0,
Inodes: 0,
}
q := Control{
backingFsBlockDev: backingFsBlockDev,
nextProjectID: minProjectID + 1,
quotas: &sync.Map{},
basePath: basePath,
}
if err := q.setProjectQuota(minProjectID, quota); err != nil {
return nil, err
}
//
// get first project id to be used for next container
//
err = q.findNextProjectID()
if err != nil {
return nil, err
}
logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
return &q, nil
}
// SetQuota - assign a unique project id to directory and set the quota limits
// for that project id
func (q *Control) SetQuota(targetPath string, quota Quota) error {
var projectID uint32
value, ok := q.quotas.Load(targetPath)
if ok {
projectID, ok = value.(uint32)
}
if !ok {
projectID = q.nextProjectID
//
// assign project id to new container directory
//
err := setProjectID(targetPath, projectID)
if err != nil {
return err
}
q.quotas.Store(targetPath, projectID)
q.nextProjectID++
}
//
// set the quota limit for the container's project id
//
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
return q.setProjectQuota(projectID, quota)
}
// ClearQuota removes the map entry in the quotas map for targetPath.
// It does so to prevent the map leaking entries as directories are deleted.
func (q *Control) ClearQuota(targetPath string) {
q.quotas.Delete(targetPath)
}
// setProjectQuota - set the quota for project id on xfs block device
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
d.d_flags = C.FS_PROJ_QUOTA
if quota.Size > 0 {
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
d.d_blk_softlimit = d.d_blk_hardlimit
}
if quota.Inodes > 0 {
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT
d.d_ino_hardlimit = C.__u64(quota.Inodes)
d.d_ino_softlimit = d.d_ino_hardlimit
}
cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
runQuotactl := func() syscall.Errno {
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
return errno
}
errno := runQuotactl()
// If the backingFsBlockDev does not exist any more then try to recreate it.
if errors.Is(errno, unix.ENOENT) {
if _, err := makeBackingFsDev(q.basePath); err != nil {
return fmt.Errorf(
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
q.backingFsBlockDev, projectID, err,
)
}
if errno := runQuotactl(); errno != 0 {
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
projectID, q.backingFsBlockDev, errno)
}
} else if errno != 0 {
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
projectID, q.backingFsBlockDev, errno)
}
return nil
}
// GetQuota - get the quota limits of a directory that was configured with SetQuota
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
d, err := q.fsDiskQuotaFromPath(targetPath)
if err != nil {
return err
}
quota.Size = uint64(d.d_blk_hardlimit) * 512
quota.Inodes = uint64(d.d_ino_hardlimit)
return nil
}
// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
d, err := q.fsDiskQuotaFromPath(targetPath)
if err != nil {
return err
}
usage.Size = int64(d.d_bcount) * 512
usage.InodeCount = int64(d.d_icount)
return nil
}
func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
var d C.fs_disk_quota_t
var projectID uint32
value, ok := q.quotas.Load(targetPath)
if ok {
projectID, ok = value.(uint32)
}
if !ok {
return d, fmt.Errorf("quota not found for path : %s", targetPath)
}
//
// get the quota limit for the container's project id
//
cs := C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
projectID, q.backingFsBlockDev, errno)
}
return d, nil
}
// getProjectID - get the project id of path on xfs
func getProjectID(targetPath string) (uint32, error) {
dir, err := openDir(targetPath)
if err != nil {
return 0, err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
return uint32(fsx.fsx_projid), nil
}
// setProjectID - set the project id of path on xfs
func setProjectID(targetPath string, projectID uint32) error {
dir, err := openDir(targetPath)
if err != nil {
return err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
}
return nil
}
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID() error {
files, err := os.ReadDir(q.basePath)
if err != nil {
return fmt.Errorf("read directory failed : %s", q.basePath)
}
for _, file := range files {
if !file.IsDir() {
continue
}
path := filepath.Join(q.basePath, file.Name())
projid, err := getProjectID(path)
if err != nil {
return err
}
if projid > 0 {
q.quotas.Store(path, projid)
}
if q.nextProjectID <= projid {
q.nextProjectID = projid + 1
}
}
return nil
}
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir, errno := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
// Get the backing block device of the driver home directory
// and create a block device node under the home directory
// to be used by quotactl commands
func makeBackingFsDev(home string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(home, &stat); err != nil {
return "", err
}
backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
// Re-create just in case someone copied the home directory over to a new device
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
if !errors.Is(err, unix.EEXIST) {
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
}
// On EEXIST, try again after unlinking any potential leftover.
_ = unix.Unlink(backingFsBlockDevTmp)
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
}
}
if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
}
return backingFsBlockDev, nil
}

View file

@ -31,8 +31,9 @@ func init() {
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
d := &Driver{
name: "vfs",
homes: []string{home},
home: home,
idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
imageStore: options.ImageStore,
}
rootIDs := d.idMappings.RootPair()
@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
key = strings.ToLower(key)
switch key {
case "vfs.imagestore", ".imagestore":
d.homes = append(d.homes, strings.Split(val, ",")...)
d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...)
continue
case "vfs.mountopt":
return nil, fmt.Errorf("vfs driver does not support mount options")
@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
return nil, fmt.Errorf("vfs driver does not support %s options", key)
}
}
// If --imagestore is provided, lets add writable graphRoot
// to vfs's additional image store, as it is done for
// `overlay` driver.
if options.ImageStore != "" {
d.homes = append(d.homes, options.ImageStore)
}
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct {
name string
homes []string
home string
additionalHomes []string
idMappings *idtools.IDMappings
ignoreChownErrors bool
naiveDiff graphdriver.DiffDriver
updater graphdriver.LayerIDMapUpdater
imageStore string
}
func (d *Driver) String() string {
@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
idMappings = opts.IDMappings
}
dir := d.dir(id)
dir := d.dir2(id, ro)
rootIDs := idMappings.RootPair()
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
return err
@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
return nil
}
func (d *Driver) dir(id string) string {
for i, home := range d.homes {
if i > 0 {
home = filepath.Join(home, d.String())
func (d *Driver) dir2(id string, useImageStore bool) string {
var homedir string
if useImageStore && d.imageStore != "" {
homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id))
} else {
homedir = filepath.Join(d.home, "dir", filepath.Base(id))
}
if _, err := os.Stat(homedir); err != nil {
additionalHomes := d.additionalHomes[:]
if d.imageStore != "" {
additionalHomes = append(additionalHomes, d.imageStore)
}
candidate := filepath.Join(home, "dir", filepath.Base(id))
fi, err := os.Stat(candidate)
if err == nil && fi.IsDir() {
return candidate
for _, home := range additionalHomes {
candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id))
fi, err := os.Stat(candidate)
if err == nil && fi.IsDir() {
return candidate
}
}
}
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
return homedir
}
func (d *Driver) dir(id string) string {
return d.dir2(id, false)
}
// Remove deletes the content from the directory for a given id.
@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool {
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir"))
entries, err := os.ReadDir(filepath.Join(d.home, "dir"))
if err != nil {
return nil, err
}
@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) {
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
if len(d.homes) > 1 {
return d.homes[1:]
if len(d.additionalHomes) > 0 {
return d.additionalHomes
}
return nil
}

View file

@ -126,6 +126,13 @@ type Layer struct {
// as a DiffID.
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
// TOCDigest represents the digest of the Table of Contents (TOC) of the blob.
// This digest is utilized when the UncompressedDigest is not
// validated during the partial image pull process, but the
// TOC itself is validated.
// It serves as an alternative reference under these specific conditions.
TOCDigest digest.Digest `json:"toc-digest,omitempty"`
// UncompressedSize is the length of the blob that was last passed to
// ApplyDiff() or create(), after we decompressed it. If
// UncompressedDigest is not set, this should be treated as if it were
@ -174,6 +181,13 @@ type DiffOptions struct {
Compression *archive.Compression
}
// stagedLayerOptions are the options passed to .create to populate a staged
// layer
type stagedLayerOptions struct {
DiffOutput *drivers.DriverWithDifferOutput
DiffOptions *drivers.ApplyDiffWithDifferOpts
}
// roLayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
@ -228,6 +242,10 @@ type roLayerStore interface {
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByTOCDigest returns a slice of the layers with the
// specified uncompressed digest value recorded for them.
LayersByTOCDigest(d digest.Digest) ([]Layer, error)
// Layers returns a slice of the known layers.
Layers() ([]Layer, error)
}
@ -256,7 +274,7 @@ type rwLayerStore interface {
// underlying drivers do not themselves distinguish between writeable
// and read-only layers. Returns the new layer structure and the size of the
// diff which was applied to its parent to initialize its contents.
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error)
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error)
// updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
@ -296,13 +314,13 @@ type rwLayerStore interface {
// ApplyDiffWithDiffer applies the changes through the differ callback function.
// If to is the empty string, then a staging directory is created by the driver.
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
// applyDiffFromStagingDirectory uses diffOutput.Target to create the diff.
applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// DifferTarget gets the location where files are stored for the layer.
DifferTarget(id string) (string, error)
@ -316,10 +334,71 @@ type rwLayerStore interface {
GarbageCollect() error
}
type multipleLockFile struct {
lockfiles []*lockfile.LockFile
}
func (l multipleLockFile) Lock() {
for _, lock := range l.lockfiles {
lock.Lock()
}
}
func (l multipleLockFile) RLock() {
for _, lock := range l.lockfiles {
lock.RLock()
}
}
func (l multipleLockFile) Unlock() {
for _, lock := range l.lockfiles {
lock.Unlock()
}
}
func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) {
// Look up only the first lockfile, since this is the value returned by RecordWrite().
return l.lockfiles[0].ModifiedSince(lastWrite)
}
func (l multipleLockFile) AssertLockedForWriting() {
for _, lock := range l.lockfiles {
lock.AssertLockedForWriting()
}
}
func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) {
return l.lockfiles[0].GetLastWrite()
}
func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) {
var lastWrite *lockfile.LastWrite
for _, lock := range l.lockfiles {
lw, err := lock.RecordWrite()
if err != nil {
return lw, err
}
// Return the first value we get so we know that
// all the locks have a write time >= to this one.
if lastWrite == nil {
lastWrite = &lw
}
}
return *lastWrite, nil
}
func (l multipleLockFile) IsReadWrite() bool {
return l.lockfiles[0].IsReadWrite()
}
func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile {
return &multipleLockFile{lockfiles: l}
}
type layerStore struct {
// The following fields are only set when constructing layerStore, and must never be modified afterwards.
// They are safe to access without any other locking.
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held.
rundir string
jsonPath [numLayerLocationIndex]string
@ -337,6 +416,7 @@ type layerStore struct {
bymount map[string]*Layer
bycompressedsum map[digest.Digest][]string
byuncompressedsum map[digest.Digest][]string
bytocsum map[digest.Digest][]string
layerspathsModified [numLayerLocationIndex]time.Time
// FIXME: This field is only set when constructing layerStore, but locking rules of the driver
@ -366,6 +446,7 @@ func copyLayer(l *Layer) *Layer {
CompressedSize: l.CompressedSize,
UncompressedDigest: l.UncompressedDigest,
UncompressedSize: l.UncompressedSize,
TOCDigest: l.TOCDigest,
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
volatileStore: l.volatileStore,
@ -745,6 +826,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
names := make(map[string]*Layer)
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
tocsums := make(map[digest.Digest][]string)
var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them.
if r.lockfile.IsReadWrite() {
selinux.ClearLabels()
@ -765,6 +847,9 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
if layer.UncompressedDigest != "" {
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
}
if layer.TOCDigest != "" {
tocsums[layer.TOCDigest] = append(tocsums[layer.TOCDigest], layer.ID)
}
if layer.MountLabel != "" {
selinux.ReserveLabel(layer.MountLabel)
}
@ -792,6 +877,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
r.byname = names
r.bycompressedsum = compressedsums
r.byuncompressedsum = uncompressedsums
r.bytocsum = tocsums
// Load and merge information about which layers are mounted, and where.
if r.lockfile.IsReadWrite() {
@ -998,22 +1084,37 @@ func (r *layerStore) saveMounts() error {
return r.loadMounts()
}
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
if err := os.MkdirAll(rundir, 0o700); err != nil {
return nil, err
}
if err := os.MkdirAll(layerdir, 0o700); err != nil {
return nil, err
}
if imagedir != "" {
if err := os.MkdirAll(imagedir, 0o700); err != nil {
return nil, err
}
}
// Note: While the containers.lock file is in rundir for transient stores
// we don't want to do this here, because the non-transient layers in
// layers.json might be used externally as a read-only layer (using e.g.
// additionalimagestores), and that would look for the lockfile in the
// same directory
var lockFiles []*lockfile.LockFile
lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
lockFiles = append(lockFiles, lockFile)
if imagedir != "" {
lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock"))
if err != nil {
return nil, err
}
lockFiles = append(lockFiles, lockFile)
}
mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock"))
if err != nil {
return nil, err
@ -1023,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
volatileDir = rundir
}
rlstore := layerStore{
lockfile: lockFile,
lockfile: newMultipleLockFile(lockFiles...),
mountsLockfile: mountsLockfile,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
@ -1060,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
return nil, err
}
rlstore := layerStore{
lockfile: lockfile,
lockfile: newMultipleLockFile(lockfile),
mountsLockfile: nil,
rundir: rundir,
jsonPath: [numLayerLocationIndex]string{
@ -1112,7 +1213,7 @@ func (r *layerStore) Size(name string) (int64, error) {
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
// created by a version of this library that didn't keep track of digest and size information).
if layer.UncompressedDigest != "" {
if layer.TOCDigest != "" || layer.UncompressedDigest != "" {
return layer.UncompressedSize, nil
}
return -1, nil
@ -1201,6 +1302,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
if layer.UncompressedDigest != "" {
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
if layer.TOCDigest != "" {
r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
}
if err := r.saveFor(layer); err != nil {
if e := r.Delete(layer.ID); e != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
@ -1211,7 +1315,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
}
// Requires startWriting.
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) {
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
if moreOptions == nil {
moreOptions = &LayerOptions{}
}
@ -1251,6 +1355,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
templateCompressedDigest digest.Digest
templateCompressedSize int64
templateUncompressedDigest digest.Digest
templateTOCDigest digest.Digest
templateUncompressedSize int64
templateCompressionType archive.Compression
templateUIDs, templateGIDs []uint32
@ -1263,6 +1368,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
}
templateMetadata = templateLayer.Metadata
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
templateTOCDigest = templateLayer.TOCDigest
templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType
@ -1291,6 +1397,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
CompressedDigest: templateCompressedDigest,
CompressedSize: templateCompressedSize,
UncompressedDigest: templateUncompressedDigest,
TOCDigest: templateTOCDigest,
UncompressedSize: templateUncompressedSize,
CompressionType: templateCompressionType,
UIDs: templateUIDs,
@ -1402,6 +1509,11 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
} else if slo != nil {
if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil {
cleanupFailureContext = "applying staged directory diff"
return nil, -1, err
}
} else {
// applyDiffWithOptions() would have updated r.bycompressedsum
// and r.byuncompressedsum for us, but if we used a template
@ -1413,6 +1525,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
if layer.UncompressedDigest != "" {
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
if layer.TOCDigest != "" {
r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
}
}
delete(layer.Flags, incompleteFlag)
@ -2007,9 +2122,16 @@ func (s *simpleGetCloser) Close() error {
// LOCKING BUG: See the comments in layerStore.Diff
func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
return getter.DiffGetter(id)
fgc, err := getter.DiffGetter(id)
if err != nil {
return nil, err
}
if fgc != nil {
return fgc, nil
}
}
path, err := r.Mount(id, drivers.MountOpts{})
path, err := r.Mount(id, drivers.MountOpts{Options: []string{"ro"}})
if err != nil {
return nil, err
}
@ -2197,6 +2319,25 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
}
func updateDigestMap(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
var newList []string
if oldvalue != "" {
for _, value := range (*m)[oldvalue] {
if value != id {
newList = append(newList, value)
}
}
if len(newList) > 0 {
(*m)[oldvalue] = newList
} else {
delete(*m, oldvalue)
}
}
if newvalue != "" {
(*m)[newvalue] = append((*m)[newvalue], id)
}
}
// Requires startWriting.
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
return r.applyDiffWithOptions(to, nil, diff)
@ -2233,7 +2374,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
uncompressedDigest = layerOptions.UncompressedDigest
} else {
} else if compression != archive.Uncompressed {
uncompressedDigester = digest.Canonical.Digester()
}
@ -2312,28 +2453,17 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if uncompressedDigester != nil {
uncompressedDigest = uncompressedDigester.Digest()
}
updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
var newList []string
if oldvalue != "" {
for _, value := range (*m)[oldvalue] {
if value != id {
newList = append(newList, value)
}
}
if len(newList) > 0 {
(*m)[oldvalue] = newList
} else {
delete(*m, oldvalue)
}
}
if newvalue != "" {
(*m)[newvalue] = append((*m)[newvalue], id)
}
if uncompressedDigest == "" && compression == archive.Uncompressed {
uncompressedDigest = compressedDigest
}
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
layer.CompressedDigest = compressedDigest
layer.CompressedSize = compressedCounter.Count
if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil {
layer.CompressedSize = *layerOptions.OriginalSize
} else {
layer.CompressedSize = compressedCounter.Count
}
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
layer.UncompressedDigest = uncompressedDigest
layer.UncompressedSize = uncompressedCounter.Count
@ -2372,7 +2502,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
}
// Requires startWriting.
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return ErrNotSupported
@ -2382,20 +2512,39 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return ErrLayerUnknown
}
if options == nil {
options = &drivers.ApplyDiffOpts{
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
options = &drivers.ApplyDiffWithDifferOpts{
ApplyDiffOpts: drivers.ApplyDiffOpts{
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
},
Flags: nil,
}
}
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options)
if err != nil {
return err
}
layer.UIDs = diffOutput.UIDs
layer.GIDs = diffOutput.GIDs
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID)
layer.UncompressedDigest = diffOutput.UncompressedDigest
updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID)
layer.TOCDigest = diffOutput.TOCDigest
layer.UncompressedSize = diffOutput.Size
layer.Metadata = diffOutput.Metadata
if options != nil && options.Flags != nil {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
for k, v := range options.Flags {
layer.Flags[k] = v
}
}
if err = r.saveFor(layer); err != nil {
return err
}
if len(diffOutput.TarSplit) != 0 {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
@ -2425,14 +2574,11 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return err
}
}
if err = r.saveFor(layer); err != nil {
return err
}
return err
}
// Requires startWriting.
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return nil, ErrNotSupported
@ -2448,9 +2594,11 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp
return nil, ErrLayerUnknown
}
if options == nil {
options = &drivers.ApplyDiffOpts{
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
options = &drivers.ApplyDiffWithDifferOpts{
ApplyDiffOpts: drivers.ApplyDiffOpts{
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
},
}
}
output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
@ -2494,6 +2642,11 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error
return r.layersByDigestMap(r.byuncompressedsum, d)
}
// Requires startReading or startWriting.
func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.bytocsum, d)
}
func closeAll(closes ...func() error) (rErr error) {
for _, f := range closes {
if err := f(); err != nil {

View file

@ -339,12 +339,43 @@ func (compression *Compression) Extension() string {
return ""
}
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
// glibc.
type nosysFileInfo struct {
os.FileInfo
}
func (fi nosysFileInfo) Sys() interface{} {
// A Sys value of type *tar.Header is safe as it is system-independent.
// The tar.FileInfoHeader function copies the fields into the returned
// header without performing any OS lookups.
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
return sys
}
return nil
}
// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi.
var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error
func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
if sysStatOverride == nil {
return tar.FileInfoHeader(fi, link)
}
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
if err != nil {
return nil, err
}
return hdr, sysStatOverride(fi, hdr)
}
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information.
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
// which have been deleted since Go 1.9 archive/tar.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := tar.FileInfoHeader(fi, link)
hdr, err := fileInfoHeaderNoLookups(fi, link)
if err != nil {
return nil, err
}
@ -385,7 +416,7 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
return err
}
for _, key := range xattrs {
if strings.HasPrefix(key, "user.") {
if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") {
value, err := system.Lgetxattr(path, key)
if err != nil {
if errors.Is(err, system.E2BIG) {
@ -477,7 +508,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
}
if fi.Mode()&os.ModeSocket != 0 {
logrus.Warnf("archive: skipping %q since it is a socket", path)
logrus.Infof("archive: skipping %q since it is a socket", path)
return nil
}
@ -534,6 +565,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
if ta.ChownOpts != nil {
hdr.Uid = ta.ChownOpts.UID
hdr.Gid = ta.ChownOpts.GID
// Dont expose the user names from the local system; they probably dont match the ta.ChownOpts value anyway,
// and they unnecessarily give recipients of the tar file potentially private data.
hdr.Uname = ""
hdr.Gname = ""
}
maybeTruncateHeaderModTime(hdr)

View file

@ -15,6 +15,31 @@ import (
"golang.org/x/sys/unix"
)
func init() {
sysStatOverride = statUnix
}
// statUnix populates hdr from system-dependent fields of fi without performing
// any OS lookups.
// Adapted from Moby.
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
hdr.Uid = int(s.Uid)
hdr.Gid = int(s.Gid)
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
}
return nil
}
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {

View file

@ -25,7 +25,7 @@ import (
const (
cacheKey = "chunked-manifest-cache"
cacheVersion = 1
cacheVersion = 2
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
@ -207,9 +207,9 @@ func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
return string(digester.Digest()), nil
}
// generateFileLocation generates a file location in the form $OFFSET@$PATH
func generateFileLocation(path string, offset uint64) []byte {
return []byte(fmt.Sprintf("%d@%s", offset, path))
// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH
func generateFileLocation(path string, offset, len uint64) []byte {
return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path))
}
// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
@ -245,7 +245,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
var tags []string
for _, k := range toc {
if k.Digest != "" {
location := generateFileLocation(k.Name, 0)
location := generateFileLocation(k.Name, 0, uint64(k.Size))
off := uint64(vdata.Len())
l := uint64(len(location))
@ -276,7 +276,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
digestLen = len(k.Digest)
}
if k.ChunkDigest != "" {
location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize))
off := uint64(vdata.Len())
l := uint64(len(location))
d := generateTag(k.ChunkDigest, off, l)
@ -490,7 +490,9 @@ func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
if digest == d {
startOff := i*metadata.tagLen + metadata.digestLen
parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
off, _ := strconv.ParseInt(parts[0], 10, 64)
len, _ := strconv.ParseInt(parts[1], 10, 64)
return digest, uint64(off), uint64(len)
}
@ -507,12 +509,16 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
defer c.mutex.RUnlock()
for _, layer := range c.layers {
digest, off, len := findTag(digest, layer.metadata)
digest, off, tagLen := findTag(digest, layer.metadata)
if digest != "" {
position := string(layer.metadata.vdata[off : off+len])
parts := strings.SplitN(position, "@", 2)
position := string(layer.metadata.vdata[off : off+tagLen])
parts := strings.SplitN(position, ":", 3)
if len(parts) != 3 {
continue
}
offFile, _ := strconv.ParseInt(parts[0], 10, 64)
return layer.target, parts[1], offFile, nil
// parts[1] is the chunk length, currently unused.
return layer.target, parts[2], offFile, nil
}
}
@ -578,7 +584,10 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
return byteSliceAsString(buf.Bytes()[from:to])
}
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
pool := iter.Pool()
pool.ReturnIterator(iter)
iter = pool.BorrowIterator(manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
if strings.ToLower(field) == "version" {
toc.Version = iter.ReadInt()
@ -657,8 +666,17 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
}
toc.Entries = append(toc.Entries, m)
}
break
}
// validate there is no extra data in the provided input. This is a security measure to avoid
// that the digest we calculate for the TOC refers to the entire document.
if iter.Error != nil && iter.Error != io.EOF {
return nil, iter.Error
}
if iter.WhatIsNext() != jsoniter.InvalidValue || !errors.Is(iter.Error, io.EOF) {
return nil, fmt.Errorf("unexpected data after manifest")
}
toc.StringsBuf = buf
return &toc, nil
}

View file

@ -257,8 +257,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
return decodedBlob, decodedTarSplit, int64(footerData.Offset), err
}
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedUncompressedChecksum)
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedCompressedChecksum)
if err != nil {
return nil, err
}

View file

@ -420,6 +420,14 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
zstdWriter.Close()
return err
}
// make sure the entire tarball is flushed to the output as it might contain
// some trailing zeros that affect the checksum.
if _, err := io.Copy(zstdWriter, its); err != nil {
zstdWriter.Close()
return err
}
if err := zstdWriter.Flush(); err != nil {
zstdWriter.Close()
return err
@ -452,12 +460,12 @@ type zstdChunkedWriter struct {
}
func (w zstdChunkedWriter) Close() error {
err := <-w.tarSplitErr
if err != nil {
w.tarSplitOut.Close()
errClose := w.tarSplitOut.Close()
if err := <-w.tarSplitErr; err != nil && err != io.EOF {
return err
}
return w.tarSplitOut.Close()
return errClose
}
func (w zstdChunkedWriter) Write(p []byte) (int, error) {

View file

@ -4,6 +4,7 @@ import (
"bufio"
"fmt"
"io"
"path/filepath"
"strings"
"time"
"unicode"
@ -93,13 +94,18 @@ func getStMode(mode uint32, typ string) (uint32, error) {
return mode, nil
}
func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
path := entry.Name
if path == "" {
func sanitizeName(name string) string {
path := filepath.Clean(name)
if path == "." {
path = "/"
} else if path[0] != '/' {
path = "/" + path
}
return path
}
func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
path := sanitizeName(entry.Name)
if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil {
return err
@ -133,9 +139,10 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
var payload string
if entry.Linkname != "" {
payload = entry.Linkname
if entry.Type == internal.TypeLink && payload[0] != '/' {
payload = "/" + payload
if entry.Type == internal.TypeSymlink {
payload = entry.Linkname
} else {
payload = sanitizeName(entry.Linkname)
}
} else {
if len(entry.Digest) > 10 {
@ -198,10 +205,13 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
if e.Linkname == "" {
continue
}
if e.Type == internal.TypeSymlink {
continue
}
links[e.Linkname] = links[e.Linkname] + 1
}
if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") {
if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") {
root := &internal.FileMetadata{
Name: "/",
Type: internal.TypeDir,

View file

@ -25,6 +25,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/fsverity"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
@ -40,12 +41,14 @@ import (
const (
maxNumberMissingChunks = 1024
autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them.
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
containersOverrideXattr = "user.containers.override_stat"
bigDataKey = "zstd-chunked-manifest"
chunkedData = "zstd-chunked-data"
chunkedLayerDataKey = "zstd-chunked-layer-data"
tocKey = "toc"
fsVerityDigestsKey = "fs-verity-digests"
fileTypeZstdChunked = iota
fileTypeEstargz
@ -71,11 +74,9 @@ type chunkedDiffer struct {
zstdReader *zstd.Decoder
rawReader io.Reader
// contentDigest is the digest of the uncompressed content
// (diffID) when the layer is fully retrieved. If the layer
// is not fully retrieved, instead of using the digest of the
// uncompressed content, it refers to the digest of the TOC.
contentDigest digest.Digest
// tocDigest is the digest of the TOC document when the layer
// is partially pulled.
tocDigest digest.Digest
// convertedToZstdChunked is set to true if the layer needs to
// be converted to the zstd:chunked format before it can be
@ -86,9 +87,18 @@ type chunkedDiffer struct {
// the layer are trusted and should not be validated.
skipValidation bool
// blobDigest is the digest of the whole compressed layer. It is used if
// convertToZstdChunked to validate a layer when it is converted since there
// is no TOC referenced by the manifest.
blobDigest digest.Digest
blobSize int64
storeOpts *types.StoreOptions
useFsVerity graphdriver.DifferFsVerity
fsVerityDigests map[string]string
fsVerityMutex sync.Mutex
}
var xattrsToIgnore = map[string]interface{}{
@ -188,33 +198,7 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser,
return streams, errs, nil
}
func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSourceSeekable) (*seekableFile, digest.Digest, map[string]string, error) {
var payload io.ReadCloser
var streams chan io.ReadCloser
var errs chan error
var err error
chunksToRequest := []ImageSourceChunk{
{
Offset: 0,
Length: uint64(blobSize),
},
}
streams, errs, err = iss.GetBlobAt(chunksToRequest)
if err != nil {
return nil, "", nil, err
}
select {
case p := <-streams:
payload = p
case err := <-errs:
return nil, "", nil, err
}
if payload == nil {
return nil, "", nil, errors.New("invalid stream returned")
}
func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) {
diff, err := archive.DecompressStream(payload)
if err != nil {
return nil, "", nil, err
@ -235,10 +219,8 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour
return nil, "", nil, err
}
digester := digest.Canonical.Digester()
hash := digester.Hash()
if _, err := io.Copy(io.MultiWriter(chunked, hash), diff); err != nil {
convertedOutputDigester := digest.Canonical.Digester()
if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil {
f.Close()
return nil, "", nil, err
}
@ -249,27 +231,39 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour
is := seekableFile{
file: f,
}
return &is, digester.Digest(), newAnnotations, nil
return &is, convertedOutputDigester.Digest(), newAnnotations, nil
}
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
storeOpts, err := types.DefaultStoreOptionsAutoDetectUID()
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
storeOpts, err := types.DefaultStoreOptions()
if err != nil {
return nil, err
}
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) {
return nil, errors.New("enable_partial_images not configured")
}
_, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
_, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
if hasZstdChunkedTOC && hasEstargzTOC {
return nil, errors.New("both zstd:chunked and eStargz TOC found")
}
if hasZstdChunkedTOC {
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
}
if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok {
if hasEstargzTOC {
return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
}
return makeConvertFromRawDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts)
}
func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
if !parseBooleanPullOption(storeOpts, "convert_images", false) {
return nil, errors.New("convert_images not configured")
}
@ -280,6 +274,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize
}
return &chunkedDiffer{
fsVerityDigests: make(map[string]string),
blobDigest: blobDigest,
blobSize: blobSize,
convertToZstdChunked: true,
copyBuffer: makeCopyBuffer(),
@ -299,22 +295,23 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
return nil, err
}
contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
}
return &chunkedDiffer{
blobSize: blobSize,
contentDigest: contentDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeZstdChunked,
layersCache: layersCache,
manifest: manifest,
storeOpts: storeOpts,
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
fsVerityDigests: make(map[string]string),
blobSize: blobSize,
tocDigest: tocDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeZstdChunked,
layersCache: layersCache,
manifest: manifest,
storeOpts: storeOpts,
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
}, nil
}
@ -328,21 +325,22 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
return nil, err
}
contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
}
return &chunkedDiffer{
blobSize: blobSize,
contentDigest: contentDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeEstargz,
layersCache: layersCache,
manifest: manifest,
storeOpts: storeOpts,
stream: iss,
tocOffset: tocOffset,
fsVerityDigests: make(map[string]string),
blobSize: blobSize,
tocDigest: tocDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeEstargz,
layersCache: layersCache,
manifest: manifest,
storeOpts: storeOpts,
stream: iss,
tocOffset: tocOffset,
}, nil
}
@ -939,6 +937,8 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT
return nil
}
type recordFsVerityFunc func(string, *os.File) error
type destinationFile struct {
digester digest.Digester
dirfd int
@ -948,9 +948,10 @@ type destinationFile struct {
options *archive.TarOptions
skipValidation bool
to io.Writer
recordFsVerity recordFsVerityFunc
}
func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) {
func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
if err != nil {
return nil, err
@ -977,15 +978,32 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar
options: options,
dirfd: dirfd,
skipValidation: skipValidation,
recordFsVerity: recordFsVerity,
}, nil
}
func (d *destinationFile) Close() (Err error) {
defer func() {
err := d.file.Close()
var roFile *os.File
var err error
if d.recordFsVerity != nil {
roFile, err = reopenFileReadOnly(d.file)
if err == nil {
defer roFile.Close()
} else if Err == nil {
Err = err
}
}
err = d.file.Close()
if Err == nil {
Err = err
}
if Err == nil && roFile != nil {
Err = d.recordFsVerity(d.metadata.Name, roFile)
}
}()
if !d.skipValidation {
@ -1008,6 +1026,35 @@ func closeDestinationFiles(files chan *destinationFile, errors chan error) {
close(errors)
}
func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error {
if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
return nil
}
// fsverity.EnableVerity doesn't return an error if fs-verity was already
// enabled on the file.
err := fsverity.EnableVerity(path, int(roFile.Fd()))
if err != nil {
if c.useFsVerity == graphdriver.DifferFsVerityRequired {
return err
}
// If it is not required, ignore the error if the filesystem does not support it.
if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) {
return nil
}
}
verity, err := fsverity.MeasureVerity(path, int(roFile.Fd()))
if err != nil {
return err
}
c.fsVerityMutex.Lock()
c.fsVerityDigests[path] = verity
c.fsVerityMutex.Unlock()
return nil
}
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
var destFile *destinationFile
@ -1095,7 +1142,11 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
}
filesToClose <- destFile
}
destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation)
recordFsVerity := c.recordFsVerity
if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
recordFsVerity = nil
}
destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity)
if err != nil {
Err = err
goto exit
@ -1130,22 +1181,12 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
}
func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
getGap := func(missingParts []missingPart, i int) int {
getGap := func(missingParts []missingPart, i int) uint64 {
prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
return int(missingParts[i].SourceChunk.Offset - prev)
}
getCost := func(missingParts []missingPart, i int) int {
cost := getGap(missingParts, i)
if missingParts[i-1].OriginFile != nil {
cost += int(missingParts[i-1].SourceChunk.Length)
}
if missingParts[i].OriginFile != nil {
cost += int(missingParts[i].SourceChunk.Length)
}
return cost
return missingParts[i].SourceChunk.Offset - prev
}
// simple case: merge chunks from the same file.
// simple case: merge chunks from the same file. Useful to reduce the number of parts to work with later.
newMissingParts := missingParts[0:1]
prevIndex := 0
for i := 1; i < len(missingParts); i++ {
@ -1165,28 +1206,50 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
}
missingParts = newMissingParts
if len(missingParts) <= target {
return missingParts
type gap struct {
from int
to int
cost uint64
}
// this implementation doesn't account for duplicates, so it could merge
// more than necessary to reach the specified target. Since target itself
// is a heuristic value, it doesn't matter.
costs := make([]int, len(missingParts)-1)
for i := 1; i < len(missingParts); i++ {
costs[i-1] = getCost(missingParts, i)
var requestGaps []gap
lastOffset := int(-1)
numberSourceChunks := 0
for i, c := range missingParts {
if c.OriginFile != nil || c.Hole {
// it does not require a network request
continue
}
numberSourceChunks++
if lastOffset >= 0 {
prevEnd := missingParts[lastOffset].SourceChunk.Offset + missingParts[lastOffset].SourceChunk.Length
cost := c.SourceChunk.Offset - prevEnd
g := gap{
from: lastOffset,
to: i,
cost: cost,
}
requestGaps = append(requestGaps, g)
}
lastOffset = i
}
sort.Ints(costs)
toShrink := len(missingParts) - target
if toShrink >= len(costs) {
toShrink = len(costs) - 1
sort.Slice(requestGaps, func(i, j int) bool {
return requestGaps[i].cost < requestGaps[j].cost
})
toMergeMap := make([]bool, len(missingParts))
remainingToMerge := numberSourceChunks - target
for _, g := range requestGaps {
if remainingToMerge < 0 && g.cost > autoMergePartsThreshold {
continue
}
for i := g.from + 1; i <= g.to; i++ {
toMergeMap[i] = true
}
remainingToMerge--
}
targetValue := costs[toShrink]
newMissingParts = missingParts[0:1]
for i := 1; i < len(missingParts); i++ {
if getCost(missingParts, i) > targetValue {
if !toMergeMap[i] {
newMissingParts = append(newMissingParts, missingParts[i])
} else {
gap := getGap(missingParts, i)
@ -1218,6 +1281,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
}
}
missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
calculateChunksToRequest()
// There are some missing files. Prepare a multirange request for the missing chunks.
@ -1231,14 +1295,13 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
}
if _, ok := err.(ErrBadRequest); ok {
requested := len(missingParts)
// If the server cannot handle at least 64 chunks in a single request, just give up.
if requested < 64 {
if len(chunksToRequest) < 64 {
return err
}
// Merge more chunks to request
missingParts = mergeMissingChunks(missingParts, requested/2)
missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
calculateChunksToRequest()
continue
}
@ -1426,15 +1489,39 @@ type findAndCopyFileOptions struct {
options *archive.TarOptions
}
func reopenFileReadOnly(f *os.File) (*os.File, error) {
path := fmt.Sprintf("/proc/self/fd/%d", f.Fd())
fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
}
return os.NewFile(uintptr(fd), f.Name()), nil
}
func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
finalizeFile := func(dstFile *os.File) error {
if dstFile != nil {
defer dstFile.Close()
if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil {
return err
}
if dstFile == nil {
return nil
}
return nil
err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false)
if err != nil {
dstFile.Close()
return err
}
var roFile *os.File
if c.useFsVerity != graphdriver.DifferFsVerityDisabled {
roFile, err = reopenFileReadOnly(dstFile)
}
dstFile.Close()
if err != nil {
return err
}
if roFile == nil {
return nil
}
defer roFile.Close()
return c.recordFsVerity(r.Name, roFile)
}
found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks)
@ -1491,6 +1578,43 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta
return new, nil
}
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
var payload io.ReadCloser
var streams chan io.ReadCloser
var errs chan error
var err error
chunksToRequest := []ImageSourceChunk{
{
Offset: 0,
Length: uint64(c.blobSize),
},
}
streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
if err != nil {
return "", err
}
select {
case p := <-streams:
payload = p
case err := <-errs:
return "", err
}
if payload == nil {
return "", errors.New("invalid stream returned")
}
originalRawDigester := digest.Canonical.Digester()
r := io.TeeReader(payload, originalRawDigester.Hash())
// copy the entire tarball and compute its digest
_, err = io.Copy(destination, r)
return originalRawDigester.Digest(), err
}
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
defer c.layersCache.release()
defer func() {
@ -1499,11 +1623,40 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
}()
c.useFsVerity = differOpts.UseFsVerity
// stream to use for reading the zstd:chunked or Estargz file.
stream := c.stream
var uncompressedDigest digest.Digest
if c.convertToZstdChunked {
fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, c.blobSize, c.stream)
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
blobFile := os.NewFile(uintptr(fd), "blob-file")
defer func() {
if blobFile != nil {
blobFile.Close()
}
}()
// calculate the checksum before accessing the file.
compressedDigest, err := c.copyAllBlobToFile(blobFile)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
if compressedDigest != c.blobDigest {
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("invalid digest to convert: expected %q, got %q", c.blobDigest, compressedDigest)
}
if _, err := blobFile.Seek(0, io.SeekStart); err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@ -1511,6 +1664,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
// need to keep it open until the entire file is processed.
defer fileSource.Close()
// Close the file so that the file descriptor is released and the file is deleted.
blobFile.Close()
blobFile = nil
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
@ -1523,12 +1680,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
c.fileType = fileTypeZstdChunked
c.manifest = manifest
c.tarSplit = tarSplit
// since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest.
c.contentDigest = diffID
c.tocOffset = tocOffset
// the file was generated by us and the digest for each file was already computed, no need to validate it again.
c.skipValidation = true
// since we retrieved the whole file and it was validated, set the uncompressed digest.
uncompressedDigest = diffID
}
lcd := chunkedLayerData{
@ -1557,11 +1714,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
Artifacts: map[string]interface{}{
tocKey: toc,
},
TOCDigest: c.contentDigest,
}
if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) {
return output, errors.New("enable_partial_images not configured")
TOCDigest: c.tocDigest,
UncompressedDigest: uncompressedDigest,
}
// When the hard links deduplication is used, file attributes are ignored because setting them
@ -1678,13 +1832,17 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
mode := os.FileMode(r.Mode)
r.Name = filepath.Clean(r.Name)
r.Linkname = filepath.Clean(r.Linkname)
t, err := typeToTarType(r.Type)
if err != nil {
return output, err
}
r.Name = filepath.Clean(r.Name)
// do not modify the value of symlinks
if r.Linkname != "" && t != tar.TypeSymlink {
r.Linkname = filepath.Clean(r.Linkname)
}
if whiteoutConverter != nil {
hdr := archivetar.Header{
Typeflag: t,
@ -1730,6 +1888,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
case tar.TypeDir:
if r.Name == "" || r.Name == "." {
output.RootDirMode = &mode
}
if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil {
return output, err
}
@ -1851,7 +2012,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
// There are some missing files. Prepare a multirange request for the missing chunks.
if len(missingParts) > 0 {
missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil {
return output, err
}
@ -1867,6 +2027,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests
return output, nil
}
@ -1926,7 +2088,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
e.Chunks = make([]*internal.FileMetadata, nChunks+1)
for j := 0; j <= nChunks; j++ {
e.Chunks[j] = &entries[i+j]
// we need a copy here, otherwise we override the
// .Size later
copy := entries[i+j]
e.Chunks[j] = &copy
e.EndOffset = entries[i+j].EndOffset
}
i += nChunks

View file

@ -9,9 +9,10 @@ import (
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
digest "github.com/opencontainers/go-digest"
)
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
return nil, errors.New("format not supported on this system")
}

View file

@ -0,0 +1,41 @@
package toc
import (
"errors"
"github.com/containers/storage/pkg/chunked/internal"
digest "github.com/opencontainers/go-digest"
)
// tocJSONDigestAnnotation is the annotation key for the digest of the estargz
// TOC JSON.
// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation
// Duplicate it here to avoid a dependency on the package.
const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
// This function retrieves a digest that represents the content of a
// table of contents (TOC) from the image's annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
d1, ok1 := annotations[tocJSONDigestAnnotation]
d2, ok2 := annotations[internal.ManifestChecksumKey]
switch {
case ok1 && ok2:
return nil, errors.New("both zstd:chunked and eStargz TOC found")
case ok1:
d, err := digest.Parse(d1)
if err != nil {
return nil, err
}
return &d, nil
case ok2:
d, err := digest.Parse(d2)
if err != nil {
return nil, err
}
return &d, nil
default:
return nil, nil
}
}

View file

@ -97,6 +97,8 @@ type OverlayOptionsConfig struct {
Inodes string `toml:"inodes,omitempty"`
// Do not create a bind mount on the storage home
SkipMountHome string `toml:"skip_mount_home,omitempty"`
// Specify whether composefs must be used to mount the data layers
UseComposefs string `toml:"use_composefs,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories
ForceMask string `toml:"force_mask,omitempty"`
@ -147,6 +149,9 @@ type OptionsConfig struct {
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
// Specify whether composefs must be used to mount the data layers
UseComposefs string `toml:"use_composefs,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories.
ForceMask os.FileMode `toml:"force_mask,omitempty"`
@ -283,6 +288,7 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
}
case "overlay", "overlay2":
// Specify whether composefs must be used to mount the data layers
if options.Overlay.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
} else if options.IgnoreChownErrors != "" {
@ -316,6 +322,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
} else if options.ForceMask != 0 {
doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask))
}
if options.Overlay.UseComposefs != "" {
doptions = append(doptions, fmt.Sprintf("%s.use_composefs=%s", driverName, options.Overlay.UseComposefs))
}
case "vfs":
if options.Vfs.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))

View file

@ -0,0 +1,45 @@
package fsverity
import (
"errors"
"fmt"
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
// verityDigest struct represents the digest used for verifying the integrity of a file.
type verityDigest struct {
Fsv unix.FsverityDigest
Buf [64]byte
}
// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
// in read-only mode.
// The 'description' parameter is a human-readable description of the file.
func EnableVerity(description string, fd int) error {
enableArg := unix.FsverityEnableArg{
Version: 1,
Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
Block_size: 4096,
}
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
}
return nil
}
// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
// The 'description' parameter is a human-readable description of the file.
func MeasureVerity(description string, fd int) (string, error) {
var digest verityDigest
digest.Fsv.Size = 64
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
if e1 != 0 {
return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
}
return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
}

View file

@ -0,0 +1,21 @@
//go:build !linux
// +build !linux
package fsverity
import (
"fmt"
)
// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
// in read-only mode.
// The 'description' parameter is a human-readable description of the file.
func EnableVerity(description string, fd int) error {
return fmt.Errorf("fs-verity is not supported on this platform")
}
// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
// The 'description' parameter is a human-readable description of the file.
func MeasureVerity(description string, fd int) (string, error) {
return "", fmt.Errorf("fs-verity is not supported on this platform")
}

View file

@ -6,21 +6,6 @@ import (
"path/filepath"
)
// GetConfigHome returns XDG_CONFIG_HOME.
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetConfigHome() (string, error) {
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
return xdgConfigHome, nil
}
home := Get()
if home == "" {
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
}
return filepath.Join(home, ".config"), nil
}
// GetDataHome returns XDG_DATA_HOME.
// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
//

View file

@ -1,5 +1,5 @@
//go:build !linux && !darwin && !freebsd
// +build !linux,!darwin,!freebsd
//go:build !linux && !darwin && !freebsd && !windows
// +build !linux,!darwin,!freebsd,!windows
package homedir
@ -8,6 +8,8 @@ package homedir
import (
"errors"
"os"
"path/filepath"
)
// GetRuntimeDir is unsupported on non-linux system.
@ -19,3 +21,18 @@ func GetRuntimeDir() (string, error) {
func StickRuntimeDirContents(files []string) ([]string, error) {
return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
}
// GetConfigHome returns XDG_CONFIG_HOME.
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetConfigHome() (string, error) {
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
return xdgConfigHome, nil
}
home := Get()
if home == "" {
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
}
return filepath.Join(home, ".config"), nil
}

View file

@ -7,12 +7,16 @@ package homedir
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
)
// Key returns the env var name for the user's home dir based on
@ -40,18 +44,6 @@ func GetShortcutString() string {
return "~"
}
// GetRuntimeDir returns XDG_RUNTIME_DIR.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetRuntimeDir() (string, error) {
if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
return filepath.EvalSymlinks(xdgRuntimeDir)
}
return "", errors.New("could not get XDG_RUNTIME_DIR")
}
// StickRuntimeDirContents sets the sticky bit on files that are under
// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
//
@ -94,3 +86,98 @@ func stick(f string) error {
m |= os.ModeSticky
return os.Chmod(f, m)
}
var (
rootlessConfigHomeDirError error
rootlessConfigHomeDirOnce sync.Once
rootlessConfigHomeDir string
rootlessRuntimeDirOnce sync.Once
rootlessRuntimeDir string
)
// isWriteableOnlyByOwner checks that the specified permission mask allows write
// access only to the owner.
func isWriteableOnlyByOwner(perm os.FileMode) bool {
return (perm & 0o722) == 0o700
}
// GetConfigHome returns XDG_CONFIG_HOME.
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetConfigHome() (string, error) {
rootlessConfigHomeDirOnce.Do(func() {
cfgHomeDir := os.Getenv("XDG_CONFIG_HOME")
if cfgHomeDir == "" {
home := Get()
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err)
return
}
tmpDir := filepath.Join(resolvedHome, ".config")
_ = os.MkdirAll(tmpDir, 0o700)
st, err := os.Stat(tmpDir)
if err != nil {
rootlessConfigHomeDirError = err
return
} else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() {
cfgHomeDir = tmpDir
} else {
rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir)
return
}
}
rootlessConfigHomeDir = cfgHomeDir
})
return rootlessConfigHomeDir, rootlessConfigHomeDirError
}
// GetRuntimeDir returns a directory suitable to store runtime files.
// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
// directory for the current user.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetRuntimeDir() (string, error) {
var rootlessRuntimeDirError error
rootlessRuntimeDirOnce.Do(func() {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir)
return
}
uid := strconv.Itoa(unshare.GetRootlessUID())
if runtimeDir == "" {
tmpDir := filepath.Join("/run", "user", uid)
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
logrus.Debug(err)
}
st, err := os.Lstat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
runtimeDir = tmpDir
}
}
if runtimeDir == "" {
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid))
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
logrus.Debug(err)
}
st, err := os.Lstat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
runtimeDir = tmpDir
} else {
rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir)
return
}
}
rootlessRuntimeDir = runtimeDir
})
return rootlessRuntimeDir, rootlessRuntimeDirError
}

View file

@ -5,6 +5,7 @@ package homedir
import (
"os"
"path/filepath"
)
// Key returns the env var name for the user's home dir based on
@ -25,8 +26,36 @@ func Get() string {
return home
}
// GetConfigHome returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func GetConfigHome() (string, error) {
return filepath.Join(Get(), ".config"), nil
}
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
return "%USERPROFILE%" // be careful while using in format functions
}
// StickRuntimeDirContents is a no-op on Windows
func StickRuntimeDirContents(files []string) ([]string, error) {
return nil, nil
}
// GetRuntimeDir returns a directory suitable to store runtime files.
// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
// directory for the current user.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetRuntimeDir() (string, error) {
data, err := GetDataHome()
if err != nil {
return "", err
}
runtimeDir := filepath.Join(data, "containers", "storage")
return runtimeDir, nil
}

View file

@ -14,7 +14,7 @@ import (
"syscall"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/moby/sys/user"
)
var (

View file

@ -17,7 +17,7 @@ const (
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
return false
return os.Getuid() != 0
}
// GetRootlessUID returns the UID of the user in the parent userNS

View file

@ -59,7 +59,7 @@ additionalimagestores = [
# can deduplicate pulling of content, disk storage of content and can allow the
# kernel to use less memory when running containers.
# containers/storage supports three keys
# containers/storage supports four keys
# * enable_partial_images="true" | "false"
# Tells containers/storage to look for files previously pulled in storage
# rather then always pulling them from the container registry.
@ -70,7 +70,12 @@ additionalimagestores = [
# Tells containers/storage where an ostree repository exists that might have
# previously pulled content which can be used when attempting to avoid
# pulling content from the container registry
pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""}
# * convert_images = "false" | "true"
# If set to true, containers/storage will convert images to a
# format compatible with partial pulls in order to take advantage
# of local deduplication and hard linking. It is an expensive
# operation so it is not enabled by default.
pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""}
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
@ -130,6 +135,9 @@ mountopt = "nodev"
# Set to skip a PRIVATE bind mount on the storage home directory.
# skip_mount_home = "false"
# Set to use composefs to mount data layers with overlay.
# use_composefs = "false"
# Size is used to set a maximum size of the container image.
# size = ""

View file

@ -96,6 +96,9 @@ mountopt = "nodev"
# Set to skip a PRIVATE bind mount on the storage home directory.
# skip_mount_home = "false"
# Set to use composefs to mount data layers with overlay.
# use_composefs = "false"
# Size is used to set a maximum size of the container image.
# size = ""

View file

@ -1,6 +1,7 @@
package storage
import (
_ "embed"
"encoding/base64"
"errors"
"fmt"
@ -10,6 +11,7 @@ import (
"reflect"
"strings"
"sync"
"syscall"
"time"
// register all of the built-in drivers
@ -69,6 +71,19 @@ type metadataStore interface {
rwMetadataStore
}
// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer
type ApplyStagedLayerOptions struct {
ID string // Mandatory
ParentLayer string // Optional
Names []string // Optional
MountLabel string // Optional
Writeable bool // Optional
LayerOptions *LayerOptions // Optional
DiffOutput *drivers.DriverWithDifferOutput // Mandatory
DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory
}
// An roBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
type roBigDataStore interface {
@ -313,14 +328,24 @@ type Store interface {
// ApplyDiffer applies a diff to a layer.
// It is the caller responsibility to clean the staging directory if it is not
// successfully applied with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
// Deprecated: it will be removed soon. Use ApplyStagedLayer instead.
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
// Deprecated: it will be removed soon. Use CleanupStagedLayer instead.
CleanupStagingDirectory(stagingDirectory string) error
// ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory,
// marking the layer for automatic removal if applying the diff fails
// for any reason.
ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error)
// CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error
// DifferTarget gets the path to the differ target.
DifferTarget(id string) (string, error)
@ -332,6 +357,10 @@ type Store interface {
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByTOCDigest returns a slice of the layers with the
// specified TOC digest value recorded for them.
LayersByTOCDigest(d digest.Digest) ([]Layer, error)
// LayerSize returns a cached approximation of the layer's size, or -1
// if we don't have a value on hand.
LayerSize(id string) (int64, error)
@ -391,6 +420,18 @@ type Store interface {
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ImageDirectory returns a path of a directory which the caller can
// use to store data, specific to the image, which the library does not
// directly manage. The directory will be deleted when the image is
// deleted.
ImageDirectory(id string) (string, error)
// ImageRunDirectory returns a path of a directory which the caller can
// use to store data, specific to the image, which the library does not
// directly manage. The directory will be deleted when the host system
// is restarted.
ImageRunDirectory(id string) (string, error)
// ListLayerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a layer.
ListLayerBigData(id string) ([]string, error)
@ -562,10 +603,19 @@ type LayerOptions struct {
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
// OriginalDigest specifies a digest of the tarstream (diff), if one is
// OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is
// provided along with these LayerOptions, and reliably known by the caller.
// The digest might not be exactly the digest of the provided tarstream
// (e.g. the digest might be of a compressed representation, while providing
// an uncompressed one); in that case the caller is responsible for the two matching.
// Use the default "" if this fields is not applicable or the value is not known.
OriginalDigest digest.Digest
// OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding
// to OriginalDigest.
// If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest,
// not the tarstream.
// Use nil if not applicable or not known.
OriginalSize *int64
// UncompressedDigest specifies a digest of the uncompressed version (“DiffID”)
// of the tarstream (diff), if one is provided along with these LayerOptions,
// and reliably known by the caller.
@ -922,11 +972,13 @@ func (s *store) load() error {
if err := os.MkdirAll(gipath, 0o700); err != nil {
return err
}
ris, err := newImageStore(gipath)
imageStore, err := newImageStore(gipath)
if err != nil {
return err
}
s.imageStore = ris
s.imageStore = imageStore
s.rwImageStores = []rwImageStore{imageStore}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
if err := os.MkdirAll(gcpath, 0o700); err != nil {
@ -944,13 +996,16 @@ func (s *store) load() error {
s.containerStore = rcs
for _, store := range driver.AdditionalImageStores() {
additionalImageStores := s.graphDriver.AdditionalImageStores()
if s.imageStoreDir != "" {
additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...)
}
for _, store := range additionalImageStores {
gipath := filepath.Join(store, driverPrefix+"images")
var ris roImageStore
if s.imageStoreDir != "" && store == s.graphRoot {
// If --imagestore was set and current store
// is `graphRoot` then mount it as a `rw` additional
// store instead of `readonly` additional store.
// both the graphdriver and the imagestore must be used read-write.
if store == s.imageStoreDir || store == s.graphRoot {
imageStore, err := newImageStore(gipath)
if err != nil {
return err
@ -960,6 +1015,10 @@ func (s *store) load() error {
} else {
ris, err = newROImageStore(gipath)
if err != nil {
if errors.Is(err, syscall.EROFS) {
logrus.Debugf("Ignoring creation of lockfiles on read-only file systems %q, %v", gipath, err)
continue
}
return err
}
}
@ -1031,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() {
// Almost all users should use startUsingGraphDriver instead.
// The caller must hold s.graphLock.
func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
driverRoot := s.imageStoreDir
imageStoreBase := s.graphRoot
if driverRoot == "" {
driverRoot = s.graphRoot
imageStoreBase = ""
}
config := drivers.Options{
Root: driverRoot,
ImageStore: imageStoreBase,
Root: s.graphRoot,
ImageStore: s.imageStoreDir,
RunRoot: s.runRoot,
DriverPriority: s.graphDriverPriority,
DriverOptions: s.graphOptions,
@ -1069,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) {
if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
imgStoreRoot := s.imageStoreDir
if imgStoreRoot == "" {
imgStoreRoot = s.graphRoot
}
glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers")
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
if err := os.MkdirAll(glpath, 0o700); err != nil {
return nil, err
}
rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore)
ilpath := ""
if s.imageStoreDir != "" {
ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers")
}
rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore)
if err != nil {
return nil, err
}
@ -1108,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) {
if err := os.MkdirAll(rlpath, 0o700); err != nil {
return nil, err
}
for _, store := range s.graphDriver.AdditionalImageStores() {
glpath := filepath.Join(store, driverPrefix+"layers")
rls, err := newROLayerStore(rlpath, glpath, s.graphDriver)
if err != nil {
return nil, err
@ -1390,8 +1445,7 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
return true
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
var parentLayer *Layer
func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
rlstore, rlstores, err := s.bothLayerStoreKinds()
if err != nil {
return nil, -1, err
@ -1404,6 +1458,8 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
return nil, -1, err
}
defer s.containerStore.stopWriting()
var parentLayer *Layer
var options LayerOptions
if lOptions != nil {
options = *lOptions
@ -1463,6 +1519,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
}
layerOptions := LayerOptions{
OriginalDigest: options.OriginalDigest,
OriginalSize: options.OriginalSize,
UncompressedDigest: options.UncompressedDigest,
Flags: options.Flags,
}
@ -1476,7 +1533,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
GIDMap: copyIDMap(gidMap),
}
}
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff)
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo)
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
@ -1686,7 +1747,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
}
}
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil)
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
@ -1857,7 +1918,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.Flags[mountLabelFlag] = mountLabel
}
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil)
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil)
if err != nil {
return nil, err
}
@ -2530,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
if err := s.writeToAllStores(func(rlstore rwLayerStore) error {
// Delete image from all available imagestores configured to be used.
imageFound := false
for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) {
for _, is := range s.rwImageStores {
if is != s.imageStore {
// This is an additional writeable image store
// so we must perform lock
@ -2741,7 +2802,13 @@ func (s *store) Status() ([][2]string, error) {
return rlstore.Status()
}
//go:embed VERSION
var storageVersion string
func (s *store) Version() ([][2]string, error) {
if trimmedVersion := strings.TrimSpace(storageVersion); trimmedVersion != "" {
return [][2]string{{"Version", trimmedVersion}}, nil
}
return [][2]string{}, nil
}
@ -2915,16 +2982,29 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
return nil, ErrLayerUnknown
}
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
if stagingDirectory != diffOutput.Target {
return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory")
}
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
if !rlstore.Exists(to) {
return struct{}{}, ErrLayerUnknown
}
return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options)
return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options)
})
return err
}
func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
slo := stagedLayerOptions{
DiffOutput: args.DiffOutput,
DiffOptions: args.DiffOptions,
}
layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
return layer, err
}
func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory)
@ -2932,7 +3012,14 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
return err
}
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error {
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target)
})
return err
}
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
if to != "" && !rlstore.Exists(to) {
return nil, ErrLayerUnknown
@ -2994,6 +3081,13 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
func (s *store) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, fmt.Errorf("looking for TOC matching digest %q: %w", d, err)
}
return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByTOCDigest(d) }, d)
}
func (s *store) LayerSize(id string) (int64, error) {
if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) {
if store.Exists(id) {
@ -3288,6 +3382,27 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
return nil, ErrContainerUnknown
}
func (s *store) ImageDirectory(id string) (string, error) {
foundImage := false
if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
if store.Exists(id) {
foundImage = true
}
middleDir := s.graphDriverName + "-images"
gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
if err := os.MkdirAll(gipath, 0o700); err != nil {
return "", true, err
}
return gipath, true, nil
}); done {
return res, err
}
if foundImage {
return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
}
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ContainerDirectory(id string) (string, error) {
res, _, err := readContainerStore(s, func() (string, bool, error) {
id, err := s.containerStore.Lookup(id)
@ -3305,6 +3420,28 @@ func (s *store) ContainerDirectory(id string) (string, error) {
return res, err
}
func (s *store) ImageRunDirectory(id string) (string, error) {
foundImage := false
if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
if store.Exists(id) {
foundImage = true
}
middleDir := s.graphDriverName + "-images"
rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
if err := os.MkdirAll(rcpath, 0o700); err != nil {
return "", true, err
}
return rcpath, true, nil
}); done {
return res, err
}
if foundImage {
return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
}
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
func (s *store) ContainerRunDirectory(id string) (string, error) {
res, _, err := readContainerStore(s, func() (string, bool, error) {
id, err := s.containerStore.Lookup(id)
@ -3545,8 +3682,8 @@ func SetDefaultConfigFilePath(path string) {
}
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
return types.DefaultConfigFile(rootless)
func DefaultConfigFile() (string, error) {
return types.DefaultConfigFile()
}
// ReloadConfigurationFile parses the specified configuration file and overrides

View file

@ -11,7 +11,9 @@ import (
"github.com/BurntSushi/toml"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
)
@ -87,7 +89,7 @@ func loadDefaultStoreOptions() {
_, err := os.Stat(defaultOverrideConfigFile)
if err == nil {
// The DefaultConfigFile(rootless) function returns the path
// The DefaultConfigFile() function returns the path
// of the used storage.conf file, by returning defaultConfigFile
// If override exists containers/storage uses it by default.
defaultConfigFile = defaultOverrideConfigFile
@ -109,21 +111,41 @@ func loadDefaultStoreOptions() {
setDefaults()
}
// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing.
// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function.
func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) {
// loadStoreOptions returns the default storage ops for containers
func loadStoreOptions() (StoreOptions, error) {
storageConf, err := DefaultConfigFile()
if err != nil {
return defaultStoreOptions, err
}
return loadStoreOptionsFromConfFile(storageConf)
}
// usePerUserStorage returns whether the user private storage must be used.
// We cannot simply use the unshare.IsRootless() condition, because
// that checks only if the current process needs a user namespace to
// work and it would break cases where the process is already created
// in a user namespace (e.g. nested Podman/Buildah) and the desired
// behavior is to use system paths instead of user private paths.
func usePerUserStorage() bool {
return unshare.IsRootless() && unshare.GetRootlessUID() != 0
}
// loadStoreOptionsFromConfFile is an internal implementation detail of DefaultStoreOptions to allow testing.
// Everyone but the tests this is intended for should only call loadStoreOptions, never this function.
func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) {
var (
defaultRootlessRunRoot string
defaultRootlessGraphRoot string
err error
)
defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
if loadDefaultStoreOptionsErr != nil {
return StoreOptions{}, loadDefaultStoreOptionsErr
}
storageOpts := defaultStoreOptions
if rootless && rootlessUID != 0 {
storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
if usePerUserStorage() {
storageOpts, err = getRootlessStorageOpts(storageOpts)
if err != nil {
return storageOpts, err
}
@ -137,7 +159,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
defaultRootlessGraphRoot = storageOpts.GraphRoot
storageOpts = StoreOptions{}
reloadConfigurationFileIfNeeded(storageConf, &storageOpts)
if rootless && rootlessUID != 0 {
if usePerUserStorage() {
// If the file did not specify a graphroot or runroot,
// set sane defaults so we don't try and use root-owned
// directories
@ -156,6 +178,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
if storageOpts.RunRoot == "" {
return storageOpts, fmt.Errorf("runroot must be set")
}
rootlessUID := unshare.GetRootlessUID()
runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)
if err != nil {
return storageOpts, err
@ -186,26 +209,17 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
return storageOpts, nil
}
// loadStoreOptions returns the default storage ops for containers
func loadStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0)
if err != nil {
return defaultStoreOptions, err
}
return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf)
}
// UpdateOptions should be called iff container engine received a SIGHUP,
// otherwise use DefaultStoreOptions
func UpdateStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
storeOptions, storeError = loadStoreOptions(rootless, rootlessUID)
func UpdateStoreOptions() (StoreOptions, error) {
storeOptions, storeError = loadStoreOptions()
return storeOptions, storeError
}
// DefaultStoreOptions returns the default storage ops for containers
func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
func DefaultStoreOptions() (StoreOptions, error) {
once.Do(func() {
storeOptions, storeError = loadStoreOptions(rootless, rootlessUID)
storeOptions, storeError = loadStoreOptions()
})
return storeOptions, storeError
}
@ -270,14 +284,26 @@ func isRootlessDriver(driver string) bool {
}
// getRootlessStorageOpts returns the storage opts for containers running as non root
func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {
func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
var opts StoreOptions
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)
rootlessUID := unshare.GetRootlessUID()
dataDir, err := homedir.GetDataHome()
if err != nil {
return opts, err
}
opts.RunRoot = rootlessRuntime
rootlessRuntime, err := homedir.GetRuntimeDir()
if err != nil {
return opts, err
}
opts.RunRoot = filepath.Join(rootlessRuntime, "containers")
if err := os.MkdirAll(opts.RunRoot, 0o700); err != nil {
return opts, fmt.Errorf("unable to make rootless runtime: %w", err)
}
opts.PullOptions = systemOpts.PullOptions
if systemOpts.RootlessStoragePath != "" {
opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID)
@ -343,12 +369,6 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
return opts, nil
}
// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {
uid := getRootlessUID()
return DefaultStoreOptions(uid != 0, uid)
}
var prevReloadConfig = struct {
storeOptions *StoreOptions
mod time.Time
@ -518,8 +538,8 @@ func Options() (StoreOptions, error) {
}
// Save overwrites the tomlConfig in storage.conf with the given conf
func Save(conf TomlConfig, rootless bool) error {
configFile, err := DefaultConfigFile(rootless)
func Save(conf TomlConfig) error {
configFile, err := DefaultConfigFile()
if err != nil {
return err
}
@ -537,10 +557,10 @@ func Save(conf TomlConfig, rootless bool) error {
}
// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it
func StorageConfig(rootless bool) (*TomlConfig, error) {
func StorageConfig() (*TomlConfig, error) {
config := new(TomlConfig)
configFile, err := DefaultConfigFile(rootless)
configFile, err := DefaultConfigFile()
if err != nil {
return nil, err
}

View file

@ -2,162 +2,15 @@ package types
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/system"
"github.com/sirupsen/logrus"
)
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
path, err := getRootlessRuntimeDir(rootlessUID)
if err != nil {
return "", err
}
path = filepath.Join(path, "containers")
if err := os.MkdirAll(path, 0o700); err != nil {
return "", fmt.Errorf("unable to make rootless runtime: %w", err)
}
return path, nil
}
type rootlessRuntimeDirEnvironment interface {
getProcCommandFile() string
getRunUserDir() string
getTmpPerUserDir() string
homeDirGetRuntimeDir() (string, error)
systemLstat(string) (*system.StatT, error)
homedirGet() string
}
type rootlessRuntimeDirEnvironmentImplementation struct {
procCommandFile string
runUserDir string
tmpPerUserDir string
}
func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string {
return env.procCommandFile
}
func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string {
return env.runUserDir
}
func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string {
return env.tmpPerUserDir
}
func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) {
return homedir.GetRuntimeDir()
}
func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) {
return system.Lstat(path)
}
func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string {
return homedir.Get()
}
func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool {
st, err := env.systemLstat(dir)
return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000
}
// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing.
// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function.
func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) {
runtimeDir, err := env.homeDirGetRuntimeDir()
if err == nil {
return runtimeDir, nil
}
initCommand, err := os.ReadFile(env.getProcCommandFile())
if err != nil || string(initCommand) == "systemd" {
runUserDir := env.getRunUserDir()
if isRootlessRuntimeDirOwner(runUserDir, env) {
return runUserDir, nil
}
}
tmpPerUserDir := env.getTmpPerUserDir()
if tmpPerUserDir != "" {
if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {
if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil {
logrus.Errorf("Failed to create temp directory for user: %v", err)
} else {
return tmpPerUserDir, nil
}
} else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) {
return tmpPerUserDir, nil
}
}
homeDir := env.homedirGet()
if homeDir == "" {
return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty")
}
resolvedHomeDir, err := filepath.EvalSymlinks(homeDir)
if err != nil {
return "", err
}
return filepath.Join(resolvedHomeDir, "rundir"), nil
}
func getRootlessRuntimeDir(rootlessUID int) (string, error) {
return getRootlessRuntimeDirIsolated(
rootlessRuntimeDirEnvironmentImplementation{
"/proc/1/comm",
fmt.Sprintf("/run/user/%d", rootlessUID),
fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID),
},
)
}
// getRootlessDirInfo returns the parent path of where the storage for containers and
// volumes will be in rootless mode
func getRootlessDirInfo(rootlessUID int) (string, string, error) {
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID)
if err != nil {
return "", "", err
}
dataDir, err := homedir.GetDataHome()
if err == nil {
return dataDir, rootlessRuntime, nil
}
home := homedir.Get()
if home == "" {
return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err)
}
// runc doesn't like symlinks in the rootfs path, and at least
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
return "", "", err
}
dataDir = filepath.Join(resolvedHome, ".local", "share")
return dataDir, rootlessRuntime, nil
}
func getRootlessUID() int {
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if uidEnv != "" {
u, _ := strconv.Atoi(uidEnv)
return u
}
return os.Geteuid()
}
func expandEnvPath(path string, rootlessUID int) (string, error) {
var err error
path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1)
@ -169,7 +22,7 @@ func expandEnvPath(path string, rootlessUID int) (string, error) {
return newpath, nil
}
func DefaultConfigFile(rootless bool) (string, error) {
func DefaultConfigFile() (string, error) {
if defaultConfigFileSet {
return defaultConfigFile, nil
}
@ -177,7 +30,7 @@ func DefaultConfigFile(rootless bool) (string, error) {
if path, ok := os.LookupEnv(storageConfEnv); ok {
return path, nil
}
if !rootless {
if !usePerUserStorage() {
if _, err := os.Stat(defaultOverrideConfigFile); err == nil {
return defaultOverrideConfigFile, nil
}

View file

@ -11,7 +11,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
"github.com/containers/storage/types"
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
libcontainerUser "github.com/moby/sys/user"
"github.com/sirupsen/logrus"
)
@ -175,7 +175,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil)
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil)
if err != nil {
return 0, err
}

View file

@ -11,19 +11,9 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap)
}
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
return types.GetRootlessRuntimeDir(rootlessUID)
}
// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers
func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) {
return types.DefaultStoreOptionsAutoDetectUID()
}
// DefaultStoreOptions returns the default storage options for containers
func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) {
return types.DefaultStoreOptions(rootless, rootlessUID)
func DefaultStoreOptions() (types.StoreOptions, error) {
return types.DefaultStoreOptions()
}
func validateMountOptions(mountOptions []string) error {