Update 'images' to v0.113.0

Signed-off-by: Tomáš Hozza <thozza@redhat.com>
This commit is contained in:
Tomáš Hozza 2025-02-03 14:26:54 +01:00 committed by Achilleas Koutsou
parent b8c2e4c45c
commit 8514c95837
646 changed files with 36206 additions and 22388 deletions

View file

@ -23,7 +23,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20240529t141726z-f40f39d13"
IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -167,13 +167,26 @@ vendor_task:
build_script: make vendor
test_script: hack/tree_status.sh
cross_task:
alias: cross
container:
image: golang:1.21
image: golang:1.22
build_script: make cross
gofix_task:
alias: gofix
container:
image: golang:1.22
build_script: go fix ./...
test_script: git diff --exit-code
codespell_task:
alias: codespell
container:
image: python
build_script: pip install codespell
test_script: codespell
# Status aggregator for all tests. This task simply ensures a defined
# set of tasks all passed, and allows confirming that based on the status
@ -190,6 +203,8 @@ success_task:
- meta
- vendor
- cross
- gofix
- codespell
container:
image: golang:1.21
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed

3
vendor/github.com/containers/storage/.codespellrc generated vendored Normal file
View file

@ -0,0 +1,3 @@
[codespell]
skip = ./.git,./vendor,./tests/tools/vendor,AUTHORS
ignore-words-list = afile,flate,prevend,Plack,worl

View file

@ -32,6 +32,11 @@ BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
GOLANGCI_LINT_VERSION := 1.61.0
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
clean: ## remove all built files
@ -41,7 +46,7 @@ containers-storage: ## build using gc on the host
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
codespell:
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w
codespell
binary local-binary: containers-storage
@ -74,7 +79,7 @@ local-validate validate: install.tools ## validate DCO on the host
@./hack/git-validation.sh
install.tools:
$(MAKE) -C tests/tools
$(MAKE) -C tests/tools GOLANGCI_LINT_VERSION=$(GOLANGCI_LINT_VERSION)
install.docs: docs
$(MAKE) -C docs install

View file

@ -1,32 +1,14 @@
approvers:
- Luap99
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg
reviewers:
- Luap99
- Honny1
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg

View file

@ -1 +1 @@
1.55.0
1.56.1

View file

@ -8,6 +8,7 @@ import (
"os"
"path"
"path/filepath"
"slices"
"sort"
"strings"
"sync"
@ -769,12 +770,9 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
return d
}
isUnaccounted := func(errs []error) bool {
for _, err := range errs {
if errors.Is(err, ErrLayerUnaccounted) {
return true
}
}
return false
return slices.ContainsFunc(errs, func(err error) bool {
return errors.Is(err, ErrLayerUnaccounted)
})
}
sort.Slice(layersToDelete, func(i, j int) bool {
// we've not heard of either of them, so remove them in the order the driver suggested
@ -1005,12 +1003,12 @@ func (c *checkDirectory) remove(path string) {
func (c *checkDirectory) header(hdr *tar.Header) {
name := path.Clean(hdr.Name)
dir, base := path.Split(name)
if strings.HasPrefix(base, archive.WhiteoutPrefix) {
if file, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
if base == archive.WhiteoutOpaqueDir {
c.remove(path.Clean(dir))
c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix())
} else {
c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):]))
c.remove(path.Join(dir, file))
}
} else {
if hdr.Typeflag == tar.TypeLink {
@ -1044,7 +1042,7 @@ func (c *checkDirectory) header(hdr *tar.Header) {
// headers updates a checkDirectory using information from the passed-in header slice
func (c *checkDirectory) headers(hdrs []*tar.Header) {
hdrs = append([]*tar.Header{}, hdrs...)
hdrs = slices.Clone(hdrs)
// sort the headers from the diff to ensure that whiteouts appear
// before content when they both appear in the same directory, per
// https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts

View file

@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"sync"
"time"
@ -162,17 +163,17 @@ type containerStore struct {
func copyContainer(c *Container) *Container {
return &Container{
ID: c.ID,
Names: copyStringSlice(c.Names),
Names: copySlicePreferringNil(c.Names),
ImageID: c.ImageID,
LayerID: c.LayerID,
Metadata: c.Metadata,
BigDataNames: copyStringSlice(c.BigDataNames),
BigDataSizes: copyStringInt64Map(c.BigDataSizes),
BigDataDigests: copyStringDigestMap(c.BigDataDigests),
BigDataNames: copySlicePreferringNil(c.BigDataNames),
BigDataSizes: copyMapPreferringNil(c.BigDataSizes),
BigDataDigests: copyMapPreferringNil(c.BigDataDigests),
Created: c.Created,
UIDMap: copyIDMap(c.UIDMap),
GIDMap: copyIDMap(c.GIDMap),
Flags: copyStringInterfaceMap(c.Flags),
UIDMap: copySlicePreferringNil(c.UIDMap),
GIDMap: copySlicePreferringNil(c.GIDMap),
Flags: copyMapPreferringNil(c.Flags),
volatileStore: c.volatileStore,
}
}
@ -690,13 +691,13 @@ func (r *containerStore) create(id string, names []string, image, layer string,
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: time.Now().UTC(),
Flags: copyStringInterfaceMap(options.Flags),
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
Flags: newMapFrom(options.Flags),
UIDMap: copySlicePreferringNil(options.UIDMap),
GIDMap: copySlicePreferringNil(options.GIDMap),
volatileStore: options.Volatile,
}
if options.MountOpts != nil {
container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
container.Flags[mountOptsFlag] = slices.Clone(options.MountOpts)
}
if options.Volatile {
container.Flags[volatileFlag] = true
@ -788,13 +789,6 @@ func (r *containerStore) Delete(id string) error {
return ErrContainerUnknown
}
id = container.ID
toDeleteIndex := -1
for i, candidate := range r.containers {
if candidate.ID == id {
toDeleteIndex = i
break
}
}
delete(r.byid, id)
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
// The stores Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
@ -803,14 +797,9 @@ func (r *containerStore) Delete(id string) error {
for _, name := range container.Names {
delete(r.byname, name)
}
if toDeleteIndex != -1 {
// delete the container at toDeleteIndex
if toDeleteIndex == len(r.containers)-1 {
r.containers = r.containers[:len(r.containers)-1]
} else {
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
}
}
r.containers = slices.DeleteFunc(r.containers, func(candidate *Container) bool {
return candidate.ID == id
})
if err := r.saveFor(container); err != nil {
return err
}
@ -916,7 +905,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
if !ok {
return nil, ErrContainerUnknown
}
return copyStringSlice(c.BigDataNames), nil
return copySlicePreferringNil(c.BigDataNames), nil
}
// Requires startWriting.
@ -948,14 +937,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
addName := true
for _, name := range c.BigDataNames {
if name == key {
addName = false
break
}
}
if addName {
if !slices.Contains(c.BigDataNames, key) {
c.BigDataNames = append(c.BigDataNames, key)
save = true
}

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
/*

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package aufs

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package aufs

View file

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package btrfs

View file

@ -1,4 +1,3 @@
//go:build !linux || !cgo
// +build !linux !cgo
package btrfs

View file

@ -1,5 +1,4 @@
//go:build linux && !btrfs_noversion && cgo
// +build linux,!btrfs_noversion,cgo
package btrfs

View file

@ -1,5 +1,4 @@
//go:build linux && btrfs_noversion && cgo
// +build linux,btrfs_noversion,cgo
package btrfs

View file

@ -1,5 +1,4 @@
//go:build darwin
// +build darwin
package graphdriver

View file

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package graphdriver

View file

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package graphdriver

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package graphdriver

View file

@ -1,5 +1,4 @@
//go:build cgo
// +build cgo
package copy
@ -17,7 +16,6 @@ import (
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
@ -200,11 +198,9 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
}
case mode&os.ModeSocket != 0:
s, err := net.Listen("unix", dstPath)
if err != nil {
if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
return err
}
s.Close()
case mode&os.ModeDevice != 0:
if unshare.IsRootless() {

View file

@ -1,5 +1,4 @@
//go:build !linux || !cgo
// +build !linux !cgo
package copy //nolint: predeclared

View file

@ -189,14 +189,14 @@ type Driver interface {
type DriverWithDifferOutput struct {
Differ Differ
Target string
Size int64
Size int64 // Size of the uncompressed layer, -1 if unknown. Must be known if UncompressedDigest is set.
UIDs []uint32
GIDs []uint32
UncompressedDigest digest.Digest
CompressedDigest digest.Digest
Metadata string
BigData map[string][]byte
TarSplit []byte
TarSplit []byte // nil if not available
TOCDigest digest.Digest
// RootDirMode is the mode of the root directory of the layer, if specified.
RootDirMode *os.FileMode
@ -254,8 +254,8 @@ type Differ interface {
type DriverWithDiffer interface {
Driver
// ApplyDiffWithDiffer applies the changes using the callback function.
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
// The staging directory created by this function is guaranteed to be usable with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
// ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
@ -496,7 +496,7 @@ func driverPut(driver ProtoDriver, id string, mainErr *error) {
if *mainErr == nil {
*mainErr = err
} else {
logrus.Errorf(err.Error())
logrus.Error(err)
}
}
}

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package graphdriver

View file

@ -1,5 +1,4 @@
//go:build solaris && cgo
// +build solaris,cgo
package graphdriver

View file

@ -1,5 +1,4 @@
//go:build !linux && !windows && !freebsd && !solaris && !darwin
// +build !linux,!windows,!freebsd,!solaris,!darwin
package graphdriver

View file

@ -128,6 +128,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
options := MountOpts{
MountLabel: mountLabel,
Options: []string{"ro"},
}
layerFs, err := driver.Get(id, options)
if err != nil {
@ -138,10 +139,6 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
parentFs := ""
if parent != "" {
options := MountOpts{
MountLabel: mountLabel,
Options: []string{"ro"},
}
parentFs, err = driver.Get(parent, options)
if err != nil {
return nil, err

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View file

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package overlay
@ -8,12 +7,12 @@ import (
"encoding/binary"
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"github.com/containers/storage/pkg/chunked/dump"
"github.com/containers/storage/pkg/fsverity"
@ -26,6 +25,10 @@ var (
composeFsHelperOnce sync.Once
composeFsHelperPath string
composeFsHelperErr error
// skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel
// does not support it. Mounting directly via a file will be supported in kernel 6.12.
skipMountViaFile atomic.Bool
)
func getComposeFsHelper() (string, error) {
@ -55,29 +58,26 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
return fmt.Errorf("failed to find mkcomposefs: %w", err)
}
fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
outFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o644)
if err != nil {
return &fs.PathError{Op: "openat", Path: destFile, Err: err}
return err
}
outFd := os.NewFile(uintptr(fd), "outFd")
fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0)
roFile, err := os.Open(fmt.Sprintf("/proc/self/fd/%d", outFile.Fd()))
if err != nil {
outFd.Close()
return fmt.Errorf("failed to dup output file: %w", err)
outFile.Close()
return fmt.Errorf("failed to reopen %s as read-only: %w", destFile, err)
}
newFd := os.NewFile(uintptr(fd), "newFd")
defer newFd.Close()
err = func() error {
// a scope to close outFd before setting fsverity on the read-only fd.
defer outFd.Close()
// a scope to close outFile before setting fsverity on the read-only fd.
defer outFile.Close()
errBuf := &bytes.Buffer{}
cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3")
cmd.ExtraFiles = []*os.File{outFd}
cmd := exec.Command(writerJson, "--from-file", "-", "-")
cmd.Stderr = errBuf
cmd.Stdin = dumpReader
cmd.Stdout = outFile
if err := cmd.Run(); err != nil {
rErr := fmt.Errorf("failed to convert json to erofs: %w", err)
exitErr := &exec.ExitError{}
@ -92,7 +92,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
return err
}
if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
if err := fsverity.EnableVerity("manifest file", int(roFile.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
@ -114,45 +114,112 @@ struct lcfs_erofs_header_s {
// hasACL returns true if the erofs blob has ACLs enabled
func hasACL(path string) (bool, error) {
const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
const (
LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
versionNumberSize = 4
magicNumberSize = 4
flagsSize = 4
)
fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
file, err := os.Open(path)
if err != nil {
return false, &fs.PathError{Op: "openat", Path: path, Err: err}
return false, err
}
defer unix.Close(fd)
defer file.Close()
// do not worry about checking the magic number, if the file is invalid
// we will fail to mount it anyway
flags := make([]byte, 4)
nread, err := unix.Pread(fd, flags, 8)
buffer := make([]byte, versionNumberSize+magicNumberSize+flagsSize)
nread, err := file.Read(buffer)
if err != nil {
return false, fmt.Errorf("pread %q: %w", path, err)
return false, err
}
if nread != 4 {
if nread != len(buffer) {
return false, fmt.Errorf("failed to read flags from %q", path)
}
flags := buffer[versionNumberSize+magicNumberSize:]
return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil
}
func mountComposefsBlob(dataDir, mountPoint string) error {
blobFile := getComposefsBlob(dataDir)
loop, err := loopback.AttachLoopDeviceRO(blobFile)
if err != nil {
return err
func openBlobFile(blobFile string, hasACL, useLoopDevice bool) (int, error) {
if useLoopDevice {
loop, err := loopback.AttachLoopDeviceRO(blobFile)
if err != nil {
return -1, err
}
defer loop.Close()
blobFile = loop.Name()
}
defer loop.Close()
fsfd, err := unix.Fsopen("erofs", 0)
if err != nil {
return -1, fmt.Errorf("failed to open erofs filesystem: %w", err)
}
defer unix.Close(fsfd)
if err := unix.FsconfigSetString(fsfd, "source", blobFile); err != nil {
return -1, fmt.Errorf("failed to set source for erofs filesystem: %w", err)
}
if err := unix.FsconfigSetFlag(fsfd, "ro"); err != nil {
return -1, fmt.Errorf("failed to set erofs filesystem read-only: %w", err)
}
if !hasACL {
if err := unix.FsconfigSetFlag(fsfd, "noacl"); err != nil {
return -1, fmt.Errorf("failed to set noacl for erofs filesystem: %w", err)
}
}
if err := unix.FsconfigCreate(fsfd); err != nil {
buffer := make([]byte, 4096)
if n, _ := unix.Read(fsfd, buffer); n > 0 {
return -1, fmt.Errorf("failed to create erofs filesystem: %s: %w", strings.TrimSuffix(string(buffer[:n]), "\n"), err)
}
return -1, fmt.Errorf("failed to create erofs filesystem: %w", err)
}
mfd, err := unix.Fsmount(fsfd, 0, unix.MOUNT_ATTR_RDONLY)
if err != nil {
buffer := make([]byte, 4096)
if n, _ := unix.Read(fsfd, buffer); n > 0 {
return -1, fmt.Errorf("failed to mount erofs filesystem: %s: %w", string(buffer[:n]), err)
}
return -1, fmt.Errorf("failed to mount erofs filesystem: %w", err)
}
return mfd, nil
}
func openComposefsMount(dataDir string) (int, error) {
blobFile := getComposefsBlob(dataDir)
hasACL, err := hasACL(blobFile)
if err != nil {
return err
}
mountOpts := "ro"
if !hasACL {
mountOpts += ",noacl"
return -1, err
}
if err := unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts); err != nil {
return fmt.Errorf("failed to mount erofs image at %q: %w", mountPoint, err)
if !skipMountViaFile.Load() {
fd, err := openBlobFile(blobFile, hasACL, false)
if err == nil || !errors.Is(err, unix.ENOTBLK) {
return fd, err
}
logrus.Debugf("The current kernel doesn't support mounting EROFS directly from a file, fallback to a loopback device")
skipMountViaFile.Store(true)
}
return openBlobFile(blobFile, hasACL, true)
}
func mountComposefsBlob(dataDir, mountPoint string) error {
mfd, err := openComposefsMount(dataDir)
if err != nil {
return err
}
defer unix.Close(mfd)
if err := unix.MoveMount(mfd, "", unix.AT_FDCWD, mountPoint, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil {
return fmt.Errorf("failed to move mount to %q: %w", mountPoint, err)
}
return nil
}

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay
@ -103,20 +102,20 @@ func mountOverlayFromMain() {
// paths, but we don't want to mess with other options.
var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string
for _, arg := range strings.Split(options.Label, ",") {
kv := strings.SplitN(arg, "=", 2)
switch kv[0] {
key, val, _ := strings.Cut(arg, "=")
switch key {
case "upperdir":
upperk = "upperdir="
upperv = kv[1]
upperv = val
case "workdir":
workk = "workdir="
workv = kv[1]
workv = val
case "lowerdir":
lowerk = "lowerdir="
lowerv = kv[1]
lowerv = val
case "label":
labelk = "label="
labelv = kv[1]
labelv = val
default:
if others == "" {
others = arg

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay
@ -14,6 +13,7 @@ import (
"os/exec"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@ -126,6 +126,7 @@ type Driver struct {
naiveDiff graphdriver.DiffDriver
supportsDType bool
supportsVolatile *bool
supportsDataOnly *bool
usingMetacopy bool
usingComposefs bool
@ -158,30 +159,7 @@ func init() {
}
func hasMetacopyOption(opts []string) bool {
for _, s := range opts {
if s == "metacopy=on" {
return true
}
}
return false
}
func stripOption(opts []string, option string) []string {
for i, s := range opts {
if s == option {
return stripOption(append(opts[:i], opts[i+1:]...), option)
}
}
return opts
}
func hasVolatileOption(opts []string) bool {
for _, s := range opts {
if s == "volatile" {
return true
}
}
return false
return slices.Contains(opts, "metacopy=on")
}
func getMountProgramFlagFile(path string) string {
@ -294,6 +272,18 @@ func (d *Driver) getSupportsVolatile() (bool, error) {
return supportsVolatile, nil
}
func (d *Driver) getSupportsDataOnly() (bool, error) {
if d.supportsDataOnly != nil {
return *d.supportsDataOnly, nil
}
supportsDataOnly, err := supportsDataOnlyLayersCached(d.home, d.runhome)
if err != nil {
return false, err
}
d.supportsDataOnly = &supportsDataOnly
return supportsDataOnly, nil
}
// isNetworkFileSystem checks if the specified file system is supported by native overlay
// as backing store when running in a user namespace.
func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
@ -382,13 +372,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if unshare.IsRootless() {
return nil, fmt.Errorf("composefs is not supported in user namespaces")
}
supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome)
if err != nil {
return nil, err
}
if !supportsDataOnly {
return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS)
}
if _, err := getComposeFsHelper(); err != nil {
return nil, fmt.Errorf("composefs helper program not found: %w", err)
}
@ -606,7 +589,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
m := os.FileMode(mask)
o.forceMask = &m
default:
return nil, fmt.Errorf("overlay: Unknown option %s", key)
return nil, fmt.Errorf("overlay: unknown option %s", key)
}
}
return o, nil
@ -891,11 +874,11 @@ func (d *Driver) pruneStagingDirectories() bool {
anyPresent := false
homeStagingDir := filepath.Join(d.home, stagingDir)
dirs, err := os.ReadDir(homeStagingDir)
stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
dirs, err := os.ReadDir(stagingDirBase)
if err == nil {
for _, dir := range dirs {
stagingDirToRemove := filepath.Join(homeStagingDir, dir.Name())
stagingDirToRemove := filepath.Join(stagingDirBase, dir.Name())
lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile))
if err != nil {
anyPresent = true
@ -1227,17 +1210,22 @@ func (d *Driver) getAllImageStores() []string {
return additionalImageStores
}
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
var homedir string
if useImageStore && d.imageStore != "" {
homedir = path.Join(d.imageStore, d.name)
} else {
homedir = d.home
// homeDirForImageStore returns the home directory to use when an image store is configured
func (d *Driver) homeDirForImageStore() string {
if d.imageStore != "" {
return path.Join(d.imageStore, d.name)
}
// If there is not an image store configured, use the same
// store
return d.home
}
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
homedir := d.home
if useImageStore {
homedir = d.homeDirForImageStore()
}
newpath := path.Join(homedir, id)
if err := fileutils.Exists(newpath); err != nil {
for _, p := range d.getAllImageStores() {
l := path.Join(p, d.name, id)
@ -1455,6 +1443,38 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err := fileutils.Exists(dir); err != nil {
return "", err
}
if _, err := redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), true); err != nil {
return "", err
}
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
mergedDir := d.getMergedDir(id, dir, inAdditionalStore)
// Attempt to create the merged dir if it doesn't exist, but don't chown an already existing directory (it might be in an additional store)
if err := idtools.MkdirAllAndChownNew(mergedDir, 0o700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil && !os.IsExist(err) {
return "", err
}
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
}
defer func() {
if retErr != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
// Ignore EINVAL, it means the directory is not a mount point and it can happen
// if the current function fails before the mount point is created.
if !errors.Is(mntErr, unix.EINVAL) {
logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr)
}
}
}
}
}()
readWrite := !inAdditionalStore
@ -1492,19 +1512,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err := unix.Uname(&uts); err == nil {
release = " " + string(uts.Release[:]) + " " + string(uts.Version[:])
}
logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release)
logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel %s", release)
} else {
logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
}
}
optsList = stripOption(optsList, "metacopy=on")
optsList = slices.DeleteFunc(optsList, func(opt string) bool {
return opt == "metacopy=on"
})
}
for _, o := range optsList {
if o == "ro" {
readWrite = false
break
}
if slices.Contains(optsList, "ro") {
readWrite = false
}
lowers, err := os.ReadFile(path.Join(dir, lowerFile))
@ -1560,7 +1579,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}()
composeFsLayers := []string{}
composeFsLayersDir := filepath.Join(dir, "composefs-layers")
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
composefsBlob := d.getComposefsData(lowerID)
if err := fileutils.Exists(composefsBlob); err != nil {
@ -1575,7 +1593,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", fmt.Errorf("cannot mount a composefs layer as writeable")
}
dest := filepath.Join(composeFsLayersDir, fmt.Sprintf("%d", i))
dest := d.getStorePrivateDirectory(id, dir, fmt.Sprintf("composefs-layers/%d", i), inAdditionalStore)
if err := os.MkdirAll(dest, 0o700); err != nil {
return "", err
}
@ -1683,12 +1701,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
}
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
}
@ -1703,33 +1715,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
mergedDir := d.getMergedDir(id, dir, inAdditionalStore)
// Attempt to create the merged dir only if it doesn't exist.
if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) {
if err := idtools.MkdirAllAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return "", err
}
}
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
}
defer func() {
if retErr != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr)
}
}
}
}()
workdir := path.Join(dir, "work")
if d.options.mountProgram == "" && unshare.IsRootless() {
optsList = append(optsList, "userxattr")
}
if options.Volatile && !hasVolatileOption(optsList) {
if options.Volatile && !slices.Contains(optsList, "volatile") {
supported, err := d.getSupportsVolatile()
if err != nil {
return "", err
@ -1790,8 +1782,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lowerDirs := strings.Join(absLowers, ":")
if len(composeFsLayers) > 0 {
composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::")
lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs
sep := "::"
supportsDataOnly, err := d.getSupportsDataOnly()
if err != nil {
return "", err
}
if !supportsDataOnly {
sep = ":"
}
composeFsLayersLowerDirs := strings.Join(composeFsLayers, sep)
lowerDirs = lowerDirs + sep + composeFsLayersLowerDirs
}
// absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent
// its usage.
@ -1877,16 +1877,36 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return mergedDir, nil
}
// getStorePrivateDirectory returns a directory path for storing data that requires exclusive access.
// If 'inAdditionalStore' is true, the path will be under the rundir, otherwise it will be placed in
// the primary store.
func (d *Driver) getStorePrivateDirectory(id, layerDir, subdir string, inAdditionalStore bool) string {
if inAdditionalStore {
return path.Join(d.runhome, id, subdir)
}
return path.Join(layerDir, subdir)
}
// getMergedDir returns the directory path that should be used as the mount point for the overlayfs.
func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string {
// If the layer is in an additional store, the lock we might hold only a reading lock. To prevent
// races with other processes, use a private directory under the main store rundir. At this point, the
// current process is holding an exclusive lock on the store, and since the rundir cannot be shared for
// different stores, it is safe to assume the current process has exclusive access to it.
if inAdditionalStore {
return path.Join(d.runhome, id, "merged")
}
return path.Join(dir, "merged")
// Ordinarily, .Get() (layer mounting) callers are supposed to guarantee exclusion.
//
// But additional stores are initialized with RO locks and dont support a write
// lock operation at all; and naiveDiff operations cause mounts/unmounts, so they might
// happen on code paths where we might only holding a RO lock for the additional store.
// To prevent races with other processes mounting or unmounting the layer,
// use a private directory under the main store rundir, not the "merged" directory inside the
// original layer store holding the layer data.
//
// To support this, contrary to the _general_ locking rules for .Diff / .Changes (which allow a RO lock),
// the top-level Store implementation uses an exclusive lock for the primary layer store;
// and since the rundir cannot be shared for different stores, it is safe to assume the
// current process has exclusive access to it.
//
// TO DO: LOCKING BUG: the .DiffSize operation does not currently hold an exclusive lock on the primary store.
// (_Some_ of the callers might be better ported to use a metadata-only size computation instead of DiffSize,
// but DiffSize probably needs to remain for computing sizes of containers RW layers.)
return d.getStorePrivateDirectory(id, dir, "merged", inAdditionalStore)
}
// Put unmounts the mount path created for the give id.
@ -1934,7 +1954,7 @@ func (d *Driver) Put(id string) error {
// If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all
// pending changes are propagated to the file system
if !unmounted {
fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0)
fd, err := unix.Open(mountpoint, unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err == nil {
if err := unix.Syncfs(fd); err != nil {
logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err)
@ -2094,9 +2114,14 @@ func (g *overlayFileGetter) Close() error {
return errs.ErrorOrNil()
}
func (d *Driver) getStagingDir(id string) string {
_, homedir, _ := d.dir2(id, d.imageStore != "")
return filepath.Join(homedir, stagingDir)
// newStagingDir creates a new staging directory and returns the path to it.
func (d *Driver) newStagingDir() (string, error) {
stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
err := os.MkdirAll(stagingDirBase, 0o700)
if err != nil && !os.IsExist(err) {
return "", err
}
return os.MkdirTemp(stagingDirBase, "")
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
@ -2128,24 +2153,16 @@ func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error)
for _, diffDir := range diffDirs {
// diffDir has the form $GRAPH_ROOT/overlay/$ID/diff, so grab the $ID from the parent directory
id := path.Base(path.Dir(diffDir))
composefsBlob := d.getComposefsData(id)
if fileutils.Exists(composefsBlob) != nil {
composefsData := d.getComposefsData(id)
if fileutils.Exists(composefsData) != nil {
// not a composefs layer, ignore it
continue
}
dir, err := os.MkdirTemp(d.runhome, "composefs-mnt")
fd, err := openComposefsMount(composefsData)
if err != nil {
return nil, err
}
if err := mountComposefsBlob(composefsBlob, dir); err != nil {
return nil, err
}
fd, err := os.Open(dir)
if err != nil {
return nil, err
}
composefsMounts[diffDir] = fd
_ = unix.Unmount(dir, unix.MNT_DETACH)
composefsMounts[diffDir] = os.NewFile(uintptr(fd), composefsData)
}
return &overlayFileGetter{diffDirs: diffDirs, composefsMounts: composefsMounts}, nil
}
@ -2164,14 +2181,14 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
feature := "dataonly-layers"
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
overlayCacheResult, _, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if overlayCacheResult {
logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
return true, nil
}
logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
return false, errors.New(overlayCacheText)
return false, nil
}
supportsDataOnly, err := supportsDataOnlyLayers(home)
if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil {
@ -2181,7 +2198,7 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
}
// ApplyDiffWithDiffer applies the changes in the new layer using the specified function
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
var idMappings *idtools.IDMappings
var forceMask *os.FileMode
@ -2197,46 +2214,31 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
idMappings = &idtools.IDMappings{}
}
var applyDir string
if id == "" {
stagingDir := d.getStagingDir(id)
err := os.MkdirAll(stagingDir, 0o700)
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
layerDir, err := os.MkdirTemp(stagingDir, "")
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
perms := defaultPerms
if forceMask != nil {
perms = *forceMask
}
applyDir = filepath.Join(layerDir, "dir")
if err := os.Mkdir(applyDir, perms); err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
defer func() {
if errRet != nil {
delete(d.stagingDirsLocks, layerDir)
lock.Unlock()
}
}()
d.stagingDirsLocks[layerDir] = lock
lock.Lock()
} else {
var err error
applyDir, err = d.getDiffPath(id)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
layerDir, err := d.newStagingDir()
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
perms := defaultPerms
if forceMask != nil {
perms = *forceMask
}
applyDir := filepath.Join(layerDir, "dir")
if err := os.Mkdir(applyDir, perms); err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
defer func() {
if errRet != nil {
delete(d.stagingDirsLocks, layerDir)
lock.Unlock()
}
}()
d.stagingDirsLocks[layerDir] = lock
lock.Lock()
logrus.Debugf("Applying differ in %s", applyDir)
@ -2273,10 +2275,6 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr
}
}()
if filepath.Dir(parentStagingDir) != d.getStagingDir(id) {
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}
diffPath, err := d.getDiffPath(id)
if err != nil {
return err
@ -2361,7 +2359,7 @@ func (d *Driver) getComposefsData(id string) string {
func (d *Driver) getDiffPath(id string) (string, error) {
dir := d.dir(id)
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), false)
}
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@ -2370,7 +2368,7 @@ func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
return nil, err
}
for i, l := range layers {
layers[i], err = redirectDiffIfAdditionalLayer(l)
layers[i], err = redirectDiffIfAdditionalLayer(l, false)
if err != nil {
return nil, err
}
@ -2713,12 +2711,17 @@ func notifyReleaseAdditionalLayer(al string) {
// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and
// returns the redirected path. If the passed diff is not the one in Additional Layer
// Store, it returns the original path without changes.
func redirectDiffIfAdditionalLayer(diffPath string) (string, error) {
func redirectDiffIfAdditionalLayer(diffPath string, checkExistence bool) (string, error) {
if ld, err := os.Readlink(diffPath); err == nil {
// diff is the link to Additional Layer Store
if !path.IsAbs(ld) {
return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld)
}
if checkExistence {
if err := fileutils.Exists(ld); err != nil {
return "", fmt.Errorf("failed to access to the linked additional layer: %w", err)
}
}
diffPath = ld
} else if err.(*os.PathError).Err != syscall.EINVAL {
return "", err

View file

@ -1,5 +1,4 @@
//go:build linux && cgo && !exclude_disk_quota
// +build linux,cgo,!exclude_disk_quota
package overlay

View file

@ -1,6 +1,4 @@
//go:build linux && (!cgo || exclude_disk_quota)
// +build linux
// +build !cgo exclude_disk_quota
package overlay

View file

@ -1,5 +1,4 @@
//go:build linux && !cgo
// +build linux,!cgo
package overlay
@ -7,6 +6,10 @@ import (
"fmt"
)
func openComposefsMount(dataDir string) (int, error) {
return 0, fmt.Errorf("composefs not supported on this build")
}
func getComposeFsHelper() (string, error) {
return "", fmt.Errorf("composefs not supported on this build")
}

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package overlay

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlayutils

View file

@ -1,5 +1,4 @@
//go:build linux && !exclude_disk_quota && cgo
// +build linux,!exclude_disk_quota,cgo
//
// projectquota.go - implements XFS project quota controls
@ -19,6 +18,16 @@ package quota
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#ifndef FS_XFLAG_PROJINHERIT
struct fsxattr {
__u32 fsx_xflags;
__u32 fsx_extsize;
__u32 fsx_nextents;
__u32 fsx_projid;
unsigned char fsx_pad[12];
};
#define FS_XFLAG_PROJINHERIT 0x00000200
#endif
#ifndef FS_IOC_FSGETXATTR
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
#endif
@ -163,6 +172,11 @@ func NewControl(basePath string) (*Control, error) {
return nil, err
}
// Clear inherit flag from top-level directory if necessary.
if err := stripProjectInherit(basePath); err != nil {
return nil, err
}
//
// get first project id to be used for next container
//
@ -340,6 +354,8 @@ func setProjectID(targetPath string, projectID uint32) error {
}
defer closeDir(dir)
logrus.Debugf("Setting quota project ID %d on %s", projectID, targetPath)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
@ -347,6 +363,7 @@ func setProjectID(targetPath string, projectID uint32) error {
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
@ -356,6 +373,36 @@ func setProjectID(targetPath string, projectID uint32) error {
return nil
}
// stripProjectInherit strips the project inherit flag from a directory.
// Used on the top-level directory to ensure project IDs are only inherited for
// files in directories we set quotas on - not the directories we want to set
// the quotas on, as that would make everything use the same project ID.
func stripProjectInherit(targetPath string) error {
dir, err := openDir(targetPath)
if err != nil {
return err
}
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to get xfs attrs for %s: %w", targetPath, errno)
}
if fsx.fsx_xflags&C.FS_XFLAG_PROJINHERIT != 0 {
// Flag is set, need to clear it.
logrus.Debugf("Clearing PROJINHERIT flag from directory %s", targetPath)
fsx.fsx_xflags = fsx.fsx_xflags &^ C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("failed to clear PROJINHERIT for %s: %w", targetPath, errno)
}
}
return nil
}
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID() error {

View file

@ -1,5 +1,4 @@
//go:build !linux || exclude_disk_quota || !cgo
// +build !linux exclude_disk_quota !cgo
package quota

View file

@ -1,5 +1,4 @@
//go:build !exclude_graphdriver_aufs && linux
// +build !exclude_graphdriver_aufs,linux
package register

View file

@ -1,5 +1,4 @@
//go:build !exclude_graphdriver_btrfs && linux
// +build !exclude_graphdriver_btrfs,linux
package register

View file

@ -1,5 +1,4 @@
//go:build !exclude_graphdriver_overlay && linux && cgo
// +build !exclude_graphdriver_overlay,linux,cgo
package register

View file

@ -1,5 +1,4 @@
//go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris
// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
package register

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package vfs // import "github.com/containers/storage/drivers/vfs"

View file

@ -764,8 +764,8 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64,
buf := bufio.NewWriter(nil)
for err == nil {
base := path.Base(hdr.Name)
if strings.HasPrefix(base, archive.WhiteoutPrefix) {
name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
if rm, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
name := path.Join(path.Dir(hdr.Name), rm)
err = w.Remove(filepath.FromSlash(name))
if err != nil {
return 0, err

View file

@ -1,5 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package zfs
@ -393,12 +392,18 @@ func (d *Driver) Remove(id string) error {
name := d.zfsPath(id)
dataset := zfs.Dataset{Name: name}
err := dataset.Destroy(zfs.DestroyRecursive)
if err == nil {
d.Lock()
delete(d.filesystemsCache, name)
d.Unlock()
if err != nil {
// We must be tolerant in case the image has already been removed,
// for example, accidentally by hand.
if _, err1 := zfs.GetDataset(name); err1 == nil {
return err
}
logrus.WithField("storage-driver", "zfs").Debugf("Layer %s has already been removed; ignore it and continue to delete the cache", id)
}
return err
d.Lock()
delete(d.filesystemsCache, name)
d.Unlock()
return nil
}
// Get returns the mountpoint for the given id after creating the target directories if necessary.

View file

@ -1,4 +1,3 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package zfs

View file

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"sync"
"time"
@ -181,18 +182,18 @@ func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
Names: copyStringSlice(i.Names),
NamesHistory: copyStringSlice(i.NamesHistory),
Digests: copySlicePreferringNil(i.Digests),
Names: copySlicePreferringNil(i.Names),
NamesHistory: copySlicePreferringNil(i.NamesHistory),
TopLayer: i.TopLayer,
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
MappedTopLayers: copySlicePreferringNil(i.MappedTopLayers),
Metadata: i.Metadata,
BigDataNames: copyStringSlice(i.BigDataNames),
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
BigDataNames: copySlicePreferringNil(i.BigDataNames),
BigDataSizes: copyMapPreferringNil(i.BigDataSizes),
BigDataDigests: copyMapPreferringNil(i.BigDataDigests),
Created: i.Created,
ReadOnly: i.ReadOnly,
Flags: copyStringInterfaceMap(i.Flags),
Flags: copyMapPreferringNil(i.Flags),
}
}
@ -716,14 +717,14 @@ func (r *imageStore) create(id string, names []string, layer string, options Ima
Digest: options.Digest,
Digests: dedupeDigests(options.Digests),
Names: names,
NamesHistory: copyStringSlice(options.NamesHistory),
NamesHistory: copySlicePreferringNil(options.NamesHistory),
TopLayer: layer,
Metadata: options.Metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: options.CreationDate,
Flags: copyStringInterfaceMap(options.Flags),
Flags: newMapFrom(options.Flags),
}
if image.Created.IsZero() {
image.Created = time.Now().UTC()
@ -863,12 +864,6 @@ func (r *imageStore) Delete(id string) error {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
id = image.ID
toDeleteIndex := -1
for i, candidate := range r.images {
if candidate.ID == id {
toDeleteIndex = i
}
}
delete(r.byid, id)
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
// The stores Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
@ -877,21 +872,18 @@ func (r *imageStore) Delete(id string) error {
delete(r.byname, name)
}
for _, digest := range image.Digests {
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool {
return i == image
})
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
r.images = r.images[:len(r.images)-1]
} else {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool {
return candidate.ID == id
})
if err := r.Save(); err != nil {
return err
}
@ -974,18 +966,7 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
if !ok {
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
return copyStringSlice(image.BigDataNames), nil
}
func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
modified := make([]*Image, 0, len(slice))
for _, v := range slice {
if v == value {
continue
}
modified = append(modified, v)
}
return modified
return copySlicePreferringNil(image.BigDataNames), nil
}
// Requires startWriting.
@ -1037,21 +1018,16 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
addName := true
for _, name := range image.BigDataNames {
if name == key {
addName = false
break
}
}
if addName {
if !slices.Contains(image.BigDataNames, key) {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
for _, oldDigest := range image.Digests {
// remove the image from the list of images in the digest-based index
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
prunedList := slices.DeleteFunc(list, func(i *Image) bool {
return i == image
})
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
@ -1066,9 +1042,7 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]
if len(list) == len(imageSliceWithoutValue(list, image)) {
// the list isn't shortened by trying to prune this image from it,
// so it's not in there yet
if !slices.Contains(list, image) {
r.bydigest[newDigest] = append(list, image)
}
}

View file

@ -5,10 +5,12 @@ import (
"errors"
"fmt"
"io"
"maps"
"os"
"path"
"path/filepath"
"reflect"
"slices"
"sort"
"strings"
"sync"
@ -134,9 +136,12 @@ type Layer struct {
TOCDigest digest.Digest `json:"toc-digest,omitempty"`
// UncompressedSize is the length of the blob that was last passed to
// ApplyDiff() or create(), after we decompressed it. If
// UncompressedDigest is not set, this should be treated as if it were
// an uninitialized value.
// ApplyDiff() or create(), after we decompressed it.
//
// - If UncompressedDigest is set, this must be set to a valid value.
// - Otherwise, if TOCDigest is set, this is either valid or -1.
// - If neither of this digests is set, this should be treated as if it were
// an uninitialized value.
UncompressedSize int64 `json:"diff-size,omitempty"`
// CompressionType is the type of compression which we detected on the blob
@ -312,9 +317,8 @@ type rwLayerStore interface {
// applies its changes to a specified layer.
ApplyDiff(to string, diff io.Reader) (int64, error)
// ApplyDiffWithDiffer applies the changes through the differ callback function.
// If to is the empty string, then a staging directory is created by the driver.
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// applyDiffWithDifferNoLock applies the changes through the differ callback function.
applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
@ -435,7 +439,7 @@ func layerLocation(l *Layer) layerLocations {
func copyLayer(l *Layer) *Layer {
return &Layer{
ID: l.ID,
Names: copyStringSlice(l.Names),
Names: copySlicePreferringNil(l.Names),
Parent: l.Parent,
Metadata: l.Metadata,
MountLabel: l.MountLabel,
@ -450,12 +454,12 @@ func copyLayer(l *Layer) *Layer {
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
volatileStore: l.volatileStore,
BigDataNames: copyStringSlice(l.BigDataNames),
Flags: copyStringInterfaceMap(l.Flags),
UIDMap: copyIDMap(l.UIDMap),
GIDMap: copyIDMap(l.GIDMap),
UIDs: copyUint32Slice(l.UIDs),
GIDs: copyUint32Slice(l.GIDs),
BigDataNames: copySlicePreferringNil(l.BigDataNames),
Flags: copyMapPreferringNil(l.Flags),
UIDMap: copySlicePreferringNil(l.UIDMap),
GIDMap: copySlicePreferringNil(l.GIDMap),
UIDs: copySlicePreferringNil(l.UIDs),
GIDs: copySlicePreferringNil(l.GIDs),
}
}
@ -909,23 +913,32 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
// user of this storage area marked for deletion but didn't manage to
// actually delete.
var incompleteDeletionErrors error // = nil
var layersToDelete []*Layer
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
if layerHasIncompleteFlag(layer) {
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
err := r.deleteInternal(layer.ID)
if err != nil {
// Don't return the error immediately, because deleteInternal does not saveLayers();
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
// deleted incomplete layers have their metadata correctly removed.
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
}
modifiedLocations |= layerLocation(layer)
// Important: Do not call r.deleteInternal() here. It modifies r.layers
// which causes unexpected side effects while iterating over r.layers here.
// The range loop has no idea that the underlying elements where shifted
// around.
layersToDelete = append(layersToDelete, layer)
}
}
// Now actually delete the layers
for _, layer := range layersToDelete {
logrus.Warnf("Found incomplete layer %q, deleting it", layer.ID)
err := r.deleteInternal(layer.ID)
if err != nil {
// Don't return the error immediately, because deleteInternal does not saveLayers();
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
// deleted incomplete layers have their metadata correctly removed.
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
}
modifiedLocations |= layerLocation(layer)
}
if err := r.saveLayers(modifiedLocations); err != nil {
return false, err
}
@ -1213,8 +1226,8 @@ func (r *layerStore) Size(name string) (int64, error) {
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
// created by a version of this library that didn't keep track of digest and size information).
if layer.TOCDigest != "" || layer.UncompressedDigest != "" {
return layer.UncompressedSize, nil
if layer.UncompressedDigest != "" || layer.TOCDigest != "" {
return layer.UncompressedSize, nil // This may return -1 if only TOCDigest is set
}
return -1, nil
}
@ -1372,7 +1385,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
templateUIDs, templateGIDs = slices.Clone(templateLayer.UIDs), slices.Clone(templateLayer.GIDs)
templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, -1, err
@ -1402,9 +1415,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
CompressionType: templateCompressionType,
UIDs: templateUIDs,
GIDs: templateGIDs,
Flags: copyStringInterfaceMap(moreOptions.Flags),
UIDMap: copyIDMap(moreOptions.UIDMap),
GIDMap: copyIDMap(moreOptions.GIDMap),
Flags: newMapFrom(moreOptions.Flags),
UIDMap: copySlicePreferringNil(moreOptions.UIDMap),
GIDMap: copySlicePreferringNil(moreOptions.GIDMap),
BigDataNames: []string{},
volatileStore: moreOptions.Volatile,
}
@ -1564,19 +1577,9 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
// - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
// - r.bymount (via loadMounts / saveMounts)
// check whether options include ro option
hasReadOnlyOpt := func(opts []string) bool {
for _, item := range opts {
if item == "ro" {
return true
}
}
return false
}
// You are not allowed to mount layers from readonly stores if they
// are not mounted read/only.
if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") {
return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
}
r.mountsLockfile.Lock()
@ -1836,14 +1839,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
return fmt.Errorf("closing bigdata file for the layer: %w", err)
}
addName := true
for _, name := range layer.BigDataNames {
if name == key {
addName = false
break
}
}
if addName {
if !slices.Contains(layer.BigDataNames, key) {
layer.BigDataNames = append(layer.BigDataNames, key)
return r.saveFor(layer)
}
@ -1856,7 +1852,7 @@ func (r *layerStore) BigDataNames(id string) ([]string, error) {
if !ok {
return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown)
}
return copyStringSlice(layer.BigDataNames), nil
return copySlicePreferringNil(layer.BigDataNames), nil
}
// Requires startReading or startWriting.
@ -1938,32 +1934,13 @@ func (r *layerStore) deleteInternal(id string) error {
delete(r.bymount, layer.MountPoint)
}
r.deleteInDigestMap(id)
toDeleteIndex := -1
for i, candidate := range r.layers {
if candidate.ID == id {
toDeleteIndex = i
break
}
}
if toDeleteIndex != -1 {
// delete the layer at toDeleteIndex
if toDeleteIndex == len(r.layers)-1 {
r.layers = r.layers[:len(r.layers)-1]
} else {
r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
}
}
if mountLabel != "" {
var found bool
for _, candidate := range r.layers {
if candidate.MountLabel == mountLabel {
found = true
break
}
}
if !found {
selinux.ReleaseLabel(mountLabel)
}
r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool {
return candidate.ID == id
})
if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool {
return candidate.MountLabel == mountLabel
}) {
selinux.ReleaseLabel(mountLabel)
}
return nil
}
@ -1971,21 +1948,15 @@ func (r *layerStore) deleteInternal(id string) error {
// Requires startWriting.
func (r *layerStore) deleteInDigestMap(id string) {
for digest, layers := range r.bycompressedsum {
for i, layerID := range layers {
if layerID == id {
layers = append(layers[:i], layers[i+1:]...)
r.bycompressedsum[digest] = layers
break
}
if i := slices.Index(layers, id); i != -1 {
layers = slices.Delete(layers, i, i+1)
r.bycompressedsum[digest] = layers
}
}
for digest, layers := range r.byuncompressedsum {
for i, layerID := range layers {
if layerID == id {
layers = append(layers[:i], layers[i+1:]...)
r.byuncompressedsum[digest] = layers
break
}
if i := slices.Index(layers, id); i != -1 {
layers = slices.Delete(layers, i, i+1)
r.byuncompressedsum[digest] = layers
}
}
}
@ -2095,6 +2066,9 @@ func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings {
}
// Requires startReading or startWriting.
//
// NOTE: Overlays implementation assumes use of an exclusive lock over the primary layer store,
// see drivers/overlay.Driver.getMergedDir.
func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to)
if err != nil {
@ -2161,6 +2135,9 @@ func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteClo
}
// Requires startReading or startWriting.
//
// NOTE: Overlays implementation assumes use of an exclusive lock over the primary layer store,
// see drivers/overlay.Driver.getMergedDir.
func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
var metadata storage.Unpacker
@ -2539,15 +2516,13 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
for k, v := range options.Flags {
layer.Flags[k] = v
}
maps.Copy(layer.Flags, options.Flags)
}
if err = r.saveFor(layer); err != nil {
return err
}
if len(diffOutput.TarSplit) != 0 {
if diffOutput.TarSplit != nil {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
if err != nil {
@ -2579,37 +2554,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
return err
}
// Requires startWriting.
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
// It must be called without any c/storage locks held to allow differ to make c/storage calls.
func (r *layerStore) applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return nil, ErrNotSupported
}
if to == "" {
output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ)
return &output, err
}
layer, ok := r.lookup(to)
if !ok {
return nil, ErrLayerUnknown
}
if options == nil {
options = &drivers.ApplyDiffWithDifferOpts{
ApplyDiffOpts: drivers.ApplyDiffOpts{
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
},
}
}
output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
if err != nil {
return nil, err
}
layer.UIDs = output.UIDs
layer.GIDs = output.GIDs
err = r.saveFor(layer)
output, err := ddriver.ApplyDiffWithDiffer(options, differ)
return &output, err
}

View file

@ -1,5 +1,4 @@
//go:build go1.10
// +build go1.10
package archive

View file

@ -1,5 +1,4 @@
//go:build !go1.10
// +build !go1.10
package archive

View file

@ -1,5 +1,4 @@
//go:build netbsd || freebsd || darwin
// +build netbsd freebsd darwin
package archive

View file

@ -124,8 +124,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok {
originalPath := filepath.Join(dir, originalBase)
if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package archive

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package archive

View file

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package archive

View file

@ -5,6 +5,7 @@ import (
"bytes"
"fmt"
"io"
"maps"
"os"
"path/filepath"
"reflect"
@ -97,8 +98,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok {
return filepath.Join(filepath.Dir(path), originalFile), nil
}
@ -319,9 +319,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
maps.Copy(oldChildren, oldInfo.children)
}
for name, newChild := range info.children {

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package archive
@ -31,7 +30,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
for range 2 {
if err := <-errs; err != nil {
return nil, nil, err
}

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package archive

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package archive

View file

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package archive
@ -80,9 +79,9 @@ func parseFileFlags(fflags string) (uint32, uint32, error) {
var set, clear uint32 = 0, 0
for _, fflag := range strings.Split(fflags, ",") {
isClear := false
if strings.HasPrefix(fflag, "no") {
if clean, ok := strings.CutPrefix(fflag, "no"); ok {
isClear = true
fflag = strings.TrimPrefix(fflag, "no")
fflag = clean
}
if value, ok := flagNameToValue[fflag]; ok {
if isClear {

View file

@ -1,5 +1,4 @@
//go:build !freebsd
// +build !freebsd
package archive

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package archive

View file

@ -83,6 +83,12 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
}
destVal, err := newUnpackDestination(root, dest)
if err != nil {
return err
}
defer destVal.Close()
r := tarArchive
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
@ -93,7 +99,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
r = decompressedArchive
}
return invokeUnpack(r, dest, options, root)
return invokeUnpack(r, destVal, options)
}
// Tar tars the requested path while chrooted to the specified root.

View file

@ -6,12 +6,26 @@ import (
"github.com/containers/storage/pkg/archive"
)
type unpackDestination struct {
dest string
}
func (dst *unpackDestination) Close() error {
return nil
}
// newUnpackDestination is a no-op on this platform
func newUnpackDestination(root, dest string) (*unpackDestination, error) {
return &unpackDestination{
dest: dest,
}, nil
}
func invokeUnpack(decompressedArchive io.Reader,
dest string,
options *archive.TarOptions, root string,
dest *unpackDestination,
options *archive.TarOptions,
) error {
_ = root // Restricting the operation to this root is not implemented on macOS
return archive.Unpack(decompressedArchive, dest, options)
return archive.Unpack(decompressedArchive, dest.dest, options)
}
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {

View file

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive
@ -9,15 +8,41 @@ import (
"flag"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"golang.org/x/sys/unix"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/reexec"
)
type unpackDestination struct {
root *os.File
dest string
}
func (dst *unpackDestination) Close() error {
return dst.root.Close()
}
// tarOptionsDescriptor is passed as an extra file
const tarOptionsDescriptor = 3
// rootFileDescriptor is passed as an extra file
const rootFileDescriptor = 4
// procPathForFd gives us a string for a descriptor.
// Note that while Linux supports actually *reading* this
// path, FreeBSD and other platforms don't; but in this codebase
// we only compare strings.
func procPathForFd(fd int) string {
return fmt.Sprintf("/proc/self/fd/%d", fd)
}
// untar is the entry-point for storage-untar on re-exec. This is not used on
// Windows as it does not support chroot, hence no point sandboxing through
// chroot and rexec.
@ -28,7 +53,7 @@ func untar() {
var options archive.TarOptions
// read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
if err := json.NewDecoder(os.NewFile(tarOptionsDescriptor, "options")).Decode(&options); err != nil {
fatal(err)
}
@ -38,7 +63,17 @@ func untar() {
root = flag.Arg(1)
}
if root == "" {
// FreeBSD doesn't have proc/self, but we can handle it here
if root == procPathForFd(rootFileDescriptor) {
// Take ownership to ensure it's closed; no need to leak
// this afterwards.
rootFd := os.NewFile(rootFileDescriptor, "tar-root")
defer rootFd.Close()
if err := unix.Fchdir(int(rootFd.Fd())); err != nil {
fatal(err)
}
root = "."
} else if root == "" {
root = dst
}
@ -57,11 +92,35 @@ func untar() {
os.Exit(0)
}
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
// newUnpackDestination takes a root directory and a destination which
// must be underneath it, and returns an object that can unpack
// in the target root using a file descriptor.
func newUnpackDestination(root, dest string) (*unpackDestination, error) {
if root == "" {
return errors.New("must specify a root to chroot to")
return nil, errors.New("must specify a root to chroot to")
}
relDest, err := filepath.Rel(root, dest)
if err != nil {
return nil, err
}
if relDest == "." {
relDest = "/"
}
if relDest[0] != '/' {
relDest = "/" + relDest
}
rootfdRaw, err := unix.Open(root, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, &fs.PathError{Op: "open", Path: root, Err: err}
}
return &unpackDestination{
root: os.NewFile(uintptr(rootfdRaw), "rootfs"),
dest: relDest,
}, nil
}
func invokeUnpack(decompressedArchive io.Reader, dest *unpackDestination, options *archive.TarOptions) error {
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
// when the full image list is passed (e.g. when this is used by
@ -72,24 +131,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
return fmt.Errorf("untar pipe failure: %w", err)
}
if root != "" {
relDest, err := filepath.Rel(root, dest)
if err != nil {
return err
}
if relDest == "." {
relDest = "/"
}
if relDest[0] != '/' {
relDest = "/" + relDest
}
dest = relDest
}
cmd := reexec.Command("storage-untar", dest, root)
cmd := reexec.Command("storage-untar", dest.dest, procPathForFd(rootFileDescriptor))
cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
// If you change this, change tarOptionsDescriptor above
cmd.ExtraFiles = append(cmd.ExtraFiles, r) // fd 3
// If you change this, change rootFileDescriptor above too
cmd.ExtraFiles = append(cmd.ExtraFiles, dest.root) // fd 4
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output

View file

@ -7,19 +7,34 @@ import (
"github.com/containers/storage/pkg/longpath"
)
type unpackDestination struct {
dest string
}
func (dst *unpackDestination) Close() error {
return nil
}
// newUnpackDestination is a no-op on this platform
func newUnpackDestination(root, dest string) (*unpackDestination, error) {
return &unpackDestination{
dest: dest,
}, nil
}
// chroot is not supported by Windows
func chroot(path string) error {
return nil
}
func invokeUnpack(decompressedArchive io.Reader,
dest string,
options *archive.TarOptions, root string,
dest *unpackDestination,
options *archive.TarOptions,
) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest.dest), options)
}
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {

View file

@ -8,7 +8,7 @@ import (
"path/filepath"
"github.com/containers/storage/pkg/mount"
"github.com/syndtr/gocapability/capability"
"github.com/moby/sys/capability"
"golang.org/x/sys/unix"
)

View file

@ -1,5 +1,4 @@
//go:build !windows && !linux && !darwin
// +build !windows,!linux,!darwin
package chrootarchive

View file

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive

View file

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive

View file

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive

View file

@ -65,11 +65,10 @@ type layer struct {
}
type layersCache struct {
layers []*layer
refs int
store storage.Store
mutex sync.RWMutex
created time.Time
layers []*layer
refs int
store storage.Store
mutex sync.RWMutex
}
var (
@ -83,6 +82,7 @@ func (c *layer) release() {
if err := unix.Munmap(c.mmapBuffer); err != nil {
logrus.Warnf("Error Munmap: layer %q: %v", c.id, err)
}
c.mmapBuffer = nil
}
}
@ -107,14 +107,13 @@ func (c *layersCache) release() {
func getLayersCacheRef(store storage.Store) *layersCache {
cacheMutex.Lock()
defer cacheMutex.Unlock()
if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
if cache != nil && cache.store == store {
cache.refs++
return cache
}
cache := &layersCache{
store: store,
refs: 1,
created: time.Now(),
cache = &layersCache{
store: store,
refs: 1,
}
return cache
}
@ -183,6 +182,9 @@ func makeBinaryDigest(stringDigest string) ([]byte, error) {
return buf, nil
}
// loadLayerCache attempts to load the cache file for the specified layer.
// If the cache file is not present or it it using a different cache file version, then
// the function returns (nil, nil).
func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
if err != nil && !errors.Is(err, os.ErrNotExist) {
@ -203,6 +205,9 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
if err != nil {
return nil, err
}
if cacheFile == nil {
return nil, nil
}
return c.createLayer(layerID, cacheFile, mmapBuffer)
}
@ -269,7 +274,7 @@ func (c *layersCache) load() error {
var newLayers []*layer
for _, r := range allLayers {
// The layer is present in the store and it is already loaded. Attempt to
// re-use it if mmap'ed.
// reuse it if mmap'ed.
if l, found := loadedLayers[r.ID]; found {
// If the layer is not marked for re-load, move it to newLayers.
if !l.reloadWithMmap {
@ -289,14 +294,16 @@ func (c *layersCache) load() error {
}
if r.ReadOnly {
// if the layer is coming from a read-only store, do not attempt
// If the layer is coming from a read-only store, do not attempt
// to write to it.
// Therefore, we wont find any matches in read-only-store layers,
// unless the read-only store layer comes prepopulated with cacheKey data.
continue
}
// the cache file is either not present or broken. Try to generate it from the TOC.
l, err = c.createCacheFileFromTOC(r.ID)
if err != nil {
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err)
}
if l != nil {
@ -617,6 +624,8 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
}, nil
}
// readCacheFileFromMemory reads a cache file from a buffer.
// It can return (nil, nil) if the cache file uses a different file version that the one currently supported.
func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
bigData := bytes.NewReader(bigDataBuffer)
@ -779,14 +788,14 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
return "", "", -1, nil
}
c.mutex.RLock()
defer c.mutex.RUnlock()
binaryDigest, err := makeBinaryDigest(digest)
if err != nil {
return "", "", 0, err
}
c.mutex.RLock()
defer c.mutex.RUnlock()
for _, layer := range c.layers {
if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) {
continue

View file

@ -2,6 +2,7 @@ package chunked
import (
archivetar "archive/tar"
"bytes"
"errors"
"fmt"
"io"
@ -14,6 +15,8 @@ import (
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
expMaps "golang.org/x/exp/maps"
)
@ -136,7 +139,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
}
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
offsetMetadata := annotations[internal.ManifestInfoKey]
if offsetMetadata == "" {
@ -211,7 +214,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
}
decodedTarSplit := []byte{}
var decodedTarSplit []byte = nil
if toc.TarSplitDigest != "" {
if tarSplitChunk.Offset <= 0 {
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
@ -256,7 +259,8 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
}
}
if err := iterateTarSplit(tarSplit, func(hdr *tar.Header) error {
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error {
e, ok := pendingFiles[hdr.Name]
if !ok {
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
@ -284,6 +288,36 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
return nil
}
// tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata
func tarSizeFromTarSplit(tarSplit []byte) (int64, error) {
var res int64 = 0
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
for {
entry, err := unpacker.Next()
if err != nil {
if err == io.EOF {
break
}
return -1, fmt.Errorf("reading tar-split entries: %w", err)
}
switch entry.Type {
case storage.SegmentType:
res += int64(len(entry.Payload))
case storage.FileType:
// entry.Size is the “logical size”, which might not be the physical size for sparse entries;
// but the way tar-split/tar/asm.WriteOutputTarStream combines FileType entries and returned files contents,
// sparse files are not supported.
// Also https://github.com/opencontainers/image-spec/blob/main/layer.md says
// > Sparse files SHOULD NOT be used because they lack consistent support across tar implementations.
res += entry.Size
default:
return -1, fmt.Errorf("unexpected tar-split entry type %q", entry.Type)
}
}
return res, nil
}
// ensureTimePointersMatch ensures that a and b are equal
func ensureTimePointersMatch(a, b *time.Time) error {
// We didnt always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil.

View file

@ -174,7 +174,7 @@ func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMeta
if usePath {
dirName := filepath.Dir(metadata.Name)
if dirName != "" {
parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY, 0)
parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
if err != nil {
return err
}
@ -402,7 +402,7 @@ func openFileUnderRoot(dirfd int, name string, flags uint64, mode os.FileMode) (
// name is the path to open relative to dirfd.
// mode specifies the mode to use for newly created files.
func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.File, error) {
fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, 0)
fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
@ -422,7 +422,7 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil
return nil, &fs.PathError{Op: "mkdirat", Path: name, Err: err2}
}
fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, 0)
fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
@ -465,7 +465,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata,
}
}
file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY, 0)
file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return err
}
@ -475,7 +475,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata,
}
func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0)
sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
if err != nil {
return err
}

View file

@ -23,3 +23,22 @@ type ErrBadRequest struct { //nolint: errname
func (e ErrBadRequest) Error() string {
return "bad request"
}
// ErrFallbackToOrdinaryLayerDownload is a custom error type that
// suggests to the caller that a fallback mechanism can be used
// instead of a hard failure.
type ErrFallbackToOrdinaryLayerDownload struct {
Err error
}
func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
return c.Err.Error()
}
func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
return c.Err
}
func newErrFallbackToOrdinaryLayerDownload(err error) error {
return ErrFallbackToOrdinaryLayerDownload{Err: err}
}

View file

@ -89,7 +89,8 @@ type chunkedDiffer struct {
// is no TOC referenced by the manifest.
blobDigest digest.Digest
blobSize int64
blobSize int64
uncompressedTarSize int64 // -1 if unknown
pullOptions map[string]string
@ -143,43 +144,98 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
}
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
// retry the operation with a different method.
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
pullOptions := store.PullOptions()
if !parseBooleanPullOption(pullOptions, "enable_partial_images", true) {
return nil, errors.New("enable_partial_images not configured")
if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) {
// If convertImages is set, the two options disagree whether fallback is permissible.
// Right now, we enable it, but thats not a promise; rather, such a configuration should ideally be rejected.
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
}
// convertImages also serves as a “must not fallback to non-partial pull” option (?!)
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
graphDriver, err := store.GraphDriver()
if err != nil {
return nil, err
}
if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported {
if convertImages {
return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String())
}
return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String()))
}
differ, canFallback, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
if err != nil {
if !canFallback {
return nil, err
}
// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
if convertImages {
logrus.Debugf("Created differ to convert blob %q", blobDigest)
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
}
return nil, newErrFallbackToOrdinaryLayerDownload(err)
}
return differ, nil
}
// getProperDiffer is an implementation detail of GetDiffer.
// It returns a “proper” differ (not a convert_images one) if possible.
// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
// is permissible.
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) {
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
if hasZstdChunkedTOC && hasEstargzTOC {
return nil, errors.New("both zstd:chunked and eStargz TOC found")
}
switch {
case hasZstdChunkedTOC && hasEstargzTOC:
return nil, false, errors.New("both zstd:chunked and eStargz TOC found")
if hasZstdChunkedTOC {
case hasZstdChunkedTOC:
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
if err != nil {
return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
return nil, false, err
}
return makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
}
if hasEstargzTOC {
differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
if err != nil {
logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err)
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
var badRequestErr ErrBadRequest
return nil, errors.As(err, &badRequestErr), err
}
logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
return differ, false, nil
case hasEstargzTOC:
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
if err != nil {
return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
return nil, false, err
}
return makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
}
differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
if err != nil {
logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err)
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
var badRequestErr ErrBadRequest
return nil, errors.As(err, &badRequestErr), err
}
logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
return differ, false, nil
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
default: // no TOC
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
if !convertImages {
return nil, true, errors.New("no TOC found and convert_images is not configured")
}
return nil, true, errors.New("no TOC found")
}
}
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
if !parseBooleanPullOption(pullOptions, "convert_images", false) {
return nil, errors.New("convert_images not configured")
}
layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
@ -189,6 +245,7 @@ func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blo
fsVerityDigests: make(map[string]string),
blobDigest: blobDigest,
blobSize: blobSize,
uncompressedTarSize: -1, // Will be computed later
convertToZstdChunked: true,
copyBuffer: makeCopyBuffer(),
layersCache: layersCache,
@ -202,24 +259,33 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
var uncompressedTarSize int64 = -1
if tarSplit != nil {
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
if err != nil {
return nil, fmt.Errorf("computing size from tar-split: %w", err)
}
}
layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
}
return &chunkedDiffer{
fsVerityDigests: make(map[string]string),
blobSize: blobSize,
tocDigest: tocDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeZstdChunked,
layersCache: layersCache,
manifest: manifest,
toc: toc,
pullOptions: pullOptions,
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
fsVerityDigests: make(map[string]string),
blobSize: blobSize,
uncompressedTarSize: uncompressedTarSize,
tocDigest: tocDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeZstdChunked,
layersCache: layersCache,
manifest: manifest,
toc: toc,
pullOptions: pullOptions,
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
}, nil
}
@ -234,16 +300,17 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig
}
return &chunkedDiffer{
fsVerityDigests: make(map[string]string),
blobSize: blobSize,
tocDigest: tocDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeEstargz,
layersCache: layersCache,
manifest: manifest,
pullOptions: pullOptions,
stream: iss,
tocOffset: tocOffset,
fsVerityDigests: make(map[string]string),
blobSize: blobSize,
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
tocDigest: tocDigest,
copyBuffer: makeCopyBuffer(),
fileType: fileTypeEstargz,
layersCache: layersCache,
manifest: manifest,
pullOptions: pullOptions,
stream: iss,
tocOffset: tocOffset,
}, nil
}
@ -947,11 +1014,9 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd i
}
if _, ok := err.(ErrBadRequest); ok {
// If the server cannot handle at least 64 chunks in a single request, just give up.
if len(chunksToRequest) < 64 {
if len(chunksToRequest) == 1 {
return err
}
// Merge more chunks to request
missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
calculateChunksToRequest()
@ -1102,6 +1167,7 @@ func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest,
if payload == nil {
return "", errors.New("invalid stream returned")
}
defer payload.Close()
originalRawDigester := digest.Canonical.Digester()
@ -1128,7 +1194,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
var compressedDigest digest.Digest
var uncompressedDigest digest.Digest
var convertedBlobSize int64
if c.convertToZstdChunked {
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
@ -1160,7 +1225,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
convertedBlobSize = tarSize
c.uncompressedTarSize = tarSize
// fileSource is a O_TMPFILE file descriptor, so we
// need to keep it open until the entire file is processed.
defer fileSource.Close()
@ -1230,6 +1295,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
TOCDigest: c.tocDigest,
UncompressedDigest: uncompressedDigest,
CompressedDigest: compressedDigest,
Size: c.uncompressedTarSize,
}
// When the hard links deduplication is used, file attributes are ignored because setting them
@ -1243,19 +1309,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
var missingParts []missingPart
mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries)
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil {
return output, err
}
output.UIDs, output.GIDs = collectIDs(mergedEntries)
if convertedBlobSize > 0 {
// if the image was converted, store the original tar size, so that
// it can be recreated correctly.
output.Size = convertedBlobSize
} else {
output.Size = totalSizeFromTOC
}
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
return output, err
@ -1331,7 +1390,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
wg.Wait()
}()
for i := 0; i < copyGoRoutines; i++ {
for range copyGoRoutines {
wg.Add(1)
jobs := copyFileJobs
@ -1572,9 +1631,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
return false
}
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) {
var totalFilesSize int64
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
countNextChunks := func(start int) int {
count := 0
for _, e := range entries[start:] {
@ -1604,10 +1661,8 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
continue
}
totalFilesSize += e.Size
if e.Type == TypeChunk {
return nil, -1, fmt.Errorf("chunk type without a regular file")
return nil, fmt.Errorf("chunk type without a regular file")
}
if e.Type == TypeReg {
@ -1643,7 +1698,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
lastChunkOffset = mergedEntries[i].chunks[j].Offset
}
}
return mergedEntries, totalFilesSize, nil
return mergedEntries, nil
}
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package chunked

View file

@ -1,68 +0,0 @@
package chunked
import (
"bytes"
"fmt"
"io"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/storage"
)
// iterateTarSplit calls handler for each tar header in tarSplit
func iterateTarSplit(tarSplit []byte, handler func(hdr *tar.Header) error) error {
// This, strictly speaking, hard-codes undocumented assumptions about how github.com/vbatts/tar-split/tar/asm.NewInputTarStream
// forms the tar-split contents. Pragmatically, NewInputTarStream should always produce storage.FileType entries at least
// for every non-empty file, which constraints it basically to the output we expect.
//
// Specifically, we assume:
// - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions
// - (There is a FileType entry for every tar header, we ignore it)
// - Trailing padding of a file, if any, is included in the next SegmentType entry
// - At the end, there may be SegmentType entries just for the terminating zero blocks.
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
for {
tsEntry, err := unpacker.Next()
if err != nil {
if err == io.EOF {
return nil
}
return fmt.Errorf("reading tar-split entries: %w", err)
}
switch tsEntry.Type {
case storage.SegmentType:
payload := tsEntry.Payload
// This is horrible, but we dont know how much padding to skip. (It can be computed from the previous hdr.Size for non-sparse
// files, but for sparse files that is set to the logical size.)
//
// First, assume that all padding is zero bytes.
// A tar header starts with a file name, which might in principle be empty, but
// at least https://github.com/opencontainers/image-spec/blob/main/layer.md#populate-initial-filesystem suggests that
// the tar name should never be empty (it should be ".", or maybe "./").
//
// This will cause us to skip all zero bytes in the trailing blocks, but thats fine.
i := 0
for i < len(payload) && payload[i] == 0 {
i++
}
payload = payload[i:]
tr := tar.NewReader(bytes.NewReader(payload))
hdr, err := tr.Next()
if err != nil {
if err == io.EOF { // Probably the last entry, but lets let the unpacker drive that.
break
}
return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err)
}
if err := handler(hdr); err != nil {
return err
}
case storage.FileType:
// Nothing
default:
return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type)
}
}
}

View file

@ -1,11 +1,10 @@
//go:build !windows
// +build !windows
package directory
import (
"errors"
"io/fs"
"os"
"path/filepath"
"syscall"
)
@ -27,7 +26,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
if err != nil {
// if dir does not exist, Usage() returns the error.
// if dir/x disappeared while walking, Usage() ignores dir/x.
if os.IsNotExist(err) && d != dir {
if errors.Is(err, fs.ErrNotExist) && d != dir {
return nil
}
return err
@ -35,6 +34,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
fileInfo, err := entry.Info()
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}

View file

@ -1,11 +1,10 @@
//go:build windows
// +build windows
package directory
import (
"errors"
"io/fs"
"os"
"path/filepath"
)
@ -25,7 +24,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
if err != nil {
// if dir does not exist, Size() returns the error.
// if dir/x disappeared while walking, Size() ignores dir/x.
if os.IsNotExist(err) && path != dir {
if errors.Is(err, fs.ErrNotExist) && path != dir {
return nil
}
return err
@ -40,6 +39,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
fileInfo, err := d.Info()
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
usage.Size += fileInfo.Size()

View file

@ -0,0 +1,38 @@
package fileutils
import (
"errors"
"os"
"syscall"
"golang.org/x/sys/unix"
)
// Exists checks whether a file or directory exists at the given path.
// If the path is a symlink, the symlink is followed.
func Exists(path string) error {
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
// simply checking the existence of a file.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
if err != nil {
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}
return nil
}
// Lexists checks whether a file or directory exists at the given path.
// If the path is a symlink, the symlink itself is checked.
func Lexists(path string) error {
// FreeBSD before 15.0 does not support the AT_SYMLINK_NOFOLLOW flag for
// faccessat. In this case, the call to faccessat will return EINVAL and
// we fall back to using Lstat.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
if errors.Is(err, syscall.EINVAL) {
_, err = os.Lstat(path)
return err
}
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}
return nil
}

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
//go:build !windows && !freebsd
package fileutils

View file

@ -1,5 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package fileutils

View file

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package fsutils

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package fsverity

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package homedir

View file

@ -1,10 +1,11 @@
//go:build linux
// +build linux
package idmap
import (
"errors"
"fmt"
"io/fs"
"os"
"runtime"
"syscall"
@ -26,7 +27,7 @@ func CreateIDMappedMount(source, target string, pid int) error {
targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
if err != nil {
return err
return &os.PathError{Op: "open_tree", Path: source, Err: err}
}
defer unix.Close(targetDirFd)
@ -35,13 +36,16 @@ func CreateIDMappedMount(source, target string, pid int) error {
Attr_set: unix.MOUNT_ATTR_IDMAP,
Userns_fd: uint64(userNsFile.Fd()),
}); err != nil {
return err
return &os.PathError{Op: "mount_setattr", Path: source, Err: err}
}
if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) {
if err := os.Mkdir(target, 0o700); err != nil && !errors.Is(err, fs.ErrExist) {
return err
}
return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH)
if err := unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil {
return &os.PathError{Op: "move_mount", Path: target, Err: err}
}
return nil
}
// CreateUsernsProcess forks the current process and creates a user namespace using the specified

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package idmap

View file

@ -367,7 +367,7 @@ func checkChownErr(err error, name string, uid, gid int) error {
return err
}
// Stat contains file states that can be overriden with ContainersOverrideXattr.
// Stat contains file states that can be overridden with ContainersOverrideXattr.
type Stat struct {
IDs IDPair
Mode os.FileMode

View file

@ -1,5 +1,4 @@
//go:build linux && cgo && libsubid
// +build linux,cgo,libsubid
package idtools

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package idtools

View file

@ -1,5 +1,4 @@
//go:build !linux || !libsubid || !cgo
// +build !linux !libsubid !cgo
package idtools

View file

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package idtools

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package idtools

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package idtools

View file

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package ioutils

View file

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package ioutils

View file

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package ioutils

View file

@ -128,9 +128,8 @@ func GetROLockfile(path string) (Locker, error) {
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(writeLock)
}
l.lock(writeLock)
}
// RLock locks the lockfile as a reader.
@ -142,9 +141,8 @@ func (l *LockFile) RLock() {
func (l *LockFile) TryLock() error {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
return l.tryLock(writeLock)
}
return l.tryLock(writeLock)
}
// TryRLock attempts to lock the lockfile as a reader.

Some files were not shown because too many files have changed in this diff Show more