Port osbuild/images v0.33.0 with dot-notation to composer
Update the osbuild/images to the version which introduces "dot notation" for distro release versions. - Replace all uses of distroregistry by distrofactory. - Delete local version of reporegistry and use the one from the osbuild/images. - Weldr: unify `createWeldrAPI()` and `createWeldrAPI2()` into a single `createTestWeldrAPI()` function`. - store/fixture: rework fixtures to allow overriding the host distro name and host architecture name. A cleanup function to restore the host distro and arch names is always part of the fixture struct. - Delete `distro_mock` package, since it is no longer used. - Bump the required version of osbuild to 98, because the OSCAP customization is using the 'compress_results' stage option, which is not available in older versions of osbuild. Signed-off-by: Tomáš Hozza <thozza@redhat.com>
This commit is contained in:
parent
f6ff8c40dd
commit
625b1578fa
1166 changed files with 154457 additions and 5508 deletions
176
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
Normal file
176
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
stdtar "archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
||||
archiver := archive.NewArchiver(idMappings)
|
||||
archiver.Untar = Untar
|
||||
return archiver
|
||||
}
|
||||
|
||||
// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends
|
||||
func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver {
|
||||
archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings)
|
||||
archiver.Untar = Untar
|
||||
return archiver
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
//
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true, dest)
|
||||
}
|
||||
|
||||
// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
|
||||
// The root directory is the directory that will be chrooted to.
|
||||
// `dest` must be a path within `root`, if it is not an error will be returned.
|
||||
//
|
||||
// `root` should set to a directory which is not controlled by any potentially
|
||||
// malicious process.
|
||||
//
|
||||
// This should be used to prevent a potential attacker from manipulating `dest`
|
||||
// such that it would provide access to files outside of `dest` through things
|
||||
// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
|
||||
// sanitizing symlinks in this manner is inherrently racey:
|
||||
// ref: CVE-2018-15664
|
||||
func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
|
||||
return untarHandler(tarArchive, dest, options, true, root)
|
||||
}
|
||||
|
||||
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, false, dest)
|
||||
}
|
||||
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("empty archive")
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
options.InUserNS = unshare.IsRootless()
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
rootIDs := idMappings.RootPair()
|
||||
|
||||
dest = filepath.Clean(dest)
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r := tarArchive
|
||||
if decompress {
|
||||
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
r = decompressedArchive
|
||||
}
|
||||
|
||||
return invokeUnpack(r, dest, options, root)
|
||||
}
|
||||
|
||||
// Tar tars the requested path while chrooted to the specified root.
|
||||
func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
}
|
||||
return invokePack(srcPath, options, root)
|
||||
}
|
||||
|
||||
// CopyFileWithTarAndChown returns a function which copies a single file from outside
|
||||
// of any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
contentReader, contentWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
defer contentWriter.Close()
|
||||
var hashError error
|
||||
var hashWorker sync.WaitGroup
|
||||
hashWorker.Add(1)
|
||||
go func() {
|
||||
t := stdtar.NewReader(contentReader)
|
||||
_, err := t.Next()
|
||||
if err != nil {
|
||||
hashError = err
|
||||
}
|
||||
if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
|
||||
hashError = err
|
||||
}
|
||||
hashWorker.Done()
|
||||
}()
|
||||
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
|
||||
err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil && hashError != nil {
|
||||
err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return archiver.CopyFileWithTar
|
||||
}
|
||||
|
||||
// CopyWithTarAndChown returns a function which copies a directory tree from outside of
|
||||
// any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.CopyWithTar
|
||||
}
|
||||
|
||||
// UntarPathAndChown returns a function which extracts an archive in a specified
|
||||
// location into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.UntarPath
|
||||
}
|
||||
18
vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
generated
vendored
Normal file
18
vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
)
|
||||
|
||||
func invokeUnpack(decompressedArchive io.Reader,
|
||||
dest string,
|
||||
options *archive.TarOptions, root string,
|
||||
) error {
|
||||
return archive.Unpack(decompressedArchive, dest, options)
|
||||
}
|
||||
|
||||
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
return archive.TarWithOptions(srcPath, options)
|
||||
}
|
||||
209
vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
generated
vendored
Normal file
209
vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
)
|
||||
|
||||
// untar is the entry-point for storage-untar on re-exec. This is not used on
|
||||
// Windows as it does not support chroot, hence no point sandboxing through
|
||||
// chroot and rexec.
|
||||
func untar() {
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
var options archive.TarOptions
|
||||
|
||||
// read the options from the pipe "ExtraFiles"
|
||||
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
dst := flag.Arg(0)
|
||||
var root string
|
||||
if len(flag.Args()) > 1 {
|
||||
root = flag.Arg(1)
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = dst
|
||||
}
|
||||
|
||||
if err := chroot(root); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
// fully consume stdin in case it is zero padded
|
||||
if _, err := flush(os.Stdin); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
|
||||
if root == "" {
|
||||
return errors.New("must specify a root to chroot to")
|
||||
}
|
||||
|
||||
// We can't pass a potentially large exclude list directly via cmd line
|
||||
// because we easily overrun the kernel's max argument/environment size
|
||||
// when the full image list is passed (e.g. when this is used by
|
||||
// `docker load`). We will marshall the options via a pipe to the
|
||||
// child
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("untar pipe failure: %w", err)
|
||||
}
|
||||
|
||||
if root != "" {
|
||||
relDest, err := filepath.Rel(root, dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if relDest == "." {
|
||||
relDest = "/"
|
||||
}
|
||||
if relDest[0] != '/' {
|
||||
relDest = "/" + relDest
|
||||
}
|
||||
dest = relDest
|
||||
}
|
||||
|
||||
cmd := reexec.Command("storage-untar", dest, root)
|
||||
cmd.Stdin = decompressedArchive
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||
output := bytes.NewBuffer(nil)
|
||||
cmd.Stdout = output
|
||||
cmd.Stderr = output
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("untar error on re-exec cmd: %w", err)
|
||||
}
|
||||
|
||||
// write the options to the pipe for the untar exec to read
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("untar json encode to pipe failed: %w", err)
|
||||
}
|
||||
w.Close()
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
|
||||
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
|
||||
// pending on write pipe forever
|
||||
io.Copy(io.Discard, decompressedArchive)
|
||||
|
||||
return fmt.Errorf("processing tar file(%s): %w", output, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tar() {
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
src := flag.Arg(0)
|
||||
var root string
|
||||
if len(flag.Args()) > 1 {
|
||||
root = flag.Arg(1)
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = src
|
||||
}
|
||||
|
||||
if err := realChroot(root); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
var options archive.TarOptions
|
||||
if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
rdr, err := archive.TarWithOptions(src, &options)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
defer rdr.Close()
|
||||
|
||||
if _, err := io.Copy(os.Stdout, rdr); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
if root == "" {
|
||||
return nil, errors.New("root path must not be empty")
|
||||
}
|
||||
|
||||
relSrc, err := filepath.Rel(root, srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if relSrc == "." {
|
||||
relSrc = "/"
|
||||
}
|
||||
if relSrc[0] != '/' {
|
||||
relSrc = "/" + relSrc
|
||||
}
|
||||
|
||||
// make sure we didn't trim a trailing slash with the call to `Rel`
|
||||
if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
|
||||
relSrc += "/"
|
||||
}
|
||||
|
||||
cmd := reexec.Command("storage-tar", relSrc, root)
|
||||
|
||||
errBuff := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = errBuff
|
||||
|
||||
tarR, tarW := io.Pipe()
|
||||
cmd.Stdout = tarW
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting options pipe for tar process: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("tar error on re-exec cmd: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("processing tar file(%s): %w", errBuff, err)
|
||||
}
|
||||
tarW.CloseWithError(err)
|
||||
}()
|
||||
|
||||
if err := json.NewEncoder(stdin).Encode(options); err != nil {
|
||||
stdin.Close()
|
||||
return nil, fmt.Errorf("tar json encode to pipe failed: %w", err)
|
||||
}
|
||||
stdin.Close()
|
||||
|
||||
return tarR, nil
|
||||
}
|
||||
30
vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
30
vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/longpath"
|
||||
)
|
||||
|
||||
// chroot is not supported by Windows
|
||||
func chroot(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.Reader,
|
||||
dest string,
|
||||
options *archive.TarOptions, root string,
|
||||
) error {
|
||||
// Windows is different to Linux here because Windows does not support
|
||||
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||
// do the unpack. We call inline instead within the daemon process.
|
||||
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
|
||||
}
|
||||
|
||||
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
// Windows is different to Linux here because Windows does not support
|
||||
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||
// do the pack. We call inline instead within the daemon process.
|
||||
return archive.TarWithOptions(srcPath, options)
|
||||
}
|
||||
120
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
120
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// chroot on linux uses pivot_root instead of chroot
|
||||
// pivot_root takes a new root and an old root.
|
||||
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
|
||||
// New root is where the new rootfs is set to.
|
||||
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
||||
// This is similar to how libcontainer sets up a container's rootfs
|
||||
func chroot(path string) (err error) {
|
||||
caps, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
|
||||
// environment not in the chroot from untrusted files.
|
||||
_, _ = user.Lookup("storage")
|
||||
_, _ = net.LookupHost("localhost")
|
||||
|
||||
// if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot
|
||||
if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) {
|
||||
return realChroot(path)
|
||||
}
|
||||
|
||||
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||
return fmt.Errorf("creating mount namespace before pivot: %w", err)
|
||||
}
|
||||
|
||||
// make everything in new ns private
|
||||
if err := mount.MakeRPrivate("/"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted, _ := mount.Mounted(path); !mounted {
|
||||
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
|
||||
return realChroot(path)
|
||||
}
|
||||
}
|
||||
|
||||
// setup oldRoot for pivot_root
|
||||
pivotDir, err := os.MkdirTemp(path, ".pivot_root")
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up pivot dir: %w", err)
|
||||
}
|
||||
|
||||
var mounted bool
|
||||
defer func() {
|
||||
if mounted {
|
||||
// make sure pivotDir is not mounted before we try to remove it
|
||||
if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil {
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
errCleanup := os.Remove(pivotDir)
|
||||
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
|
||||
// because we already cleaned it up on failed pivot_root
|
||||
if errCleanup != nil && !os.IsNotExist(errCleanup) {
|
||||
errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup)
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := unix.PivotRoot(path, pivotDir); err != nil {
|
||||
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
||||
if err := os.Remove(pivotDir); err != nil {
|
||||
return fmt.Errorf("cleaning up after failed pivot: %w", err)
|
||||
}
|
||||
return realChroot(path)
|
||||
}
|
||||
mounted = true
|
||||
|
||||
// This is the new path for where the old root (prior to the pivot) has been moved to
|
||||
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
|
||||
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("changing to new root: %w", err)
|
||||
}
|
||||
|
||||
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
||||
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("making old root private after pivot: %w", err)
|
||||
}
|
||||
|
||||
// Now unmount the old root so it's no longer visible from the new root
|
||||
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("while unmounting old root after pivot: %w", err)
|
||||
}
|
||||
mounted = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func realChroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return fmt.Errorf("after fallback to chroot: %w", err)
|
||||
}
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("changing to new root after chroot: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
17
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
//go:build !windows && !linux && !darwin
|
||||
// +build !windows,!linux,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func realChroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Chdir("/")
|
||||
}
|
||||
|
||||
func chroot(path string) error {
|
||||
return realChroot(path)
|
||||
}
|
||||
23
vendor/github.com/containers/storage/pkg/chrootarchive/diff.go
generated
vendored
Normal file
23
vendor/github.com/containers/storage/pkg/chrootarchive/diff.go
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
)
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can only be
|
||||
// uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
|
||||
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
||||
40
vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
generated
vendored
Normal file
40
vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
)
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
|
||||
tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s: %w", dest, err)
|
||||
}
|
||||
|
||||
s, err := archive.UnpackLayer(dest, layer, options)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
128
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
Normal file
128
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
)
|
||||
|
||||
type applyLayerResponse struct {
|
||||
LayerSize int64 `json:"layerSize"`
|
||||
}
|
||||
|
||||
// applyLayer is the entry-point for storage-applylayer on re-exec. This is not
|
||||
// used on Windows as it does not support chroot, hence no point sandboxing
|
||||
// through chroot and rexec.
|
||||
func applyLayer() {
|
||||
var (
|
||||
tmpDir string
|
||||
err error
|
||||
options *archive.TarOptions
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
inUserns := unshare.IsRootless()
|
||||
if err := chroot(flag.Arg(0)); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
// We need to be able to set any perms
|
||||
oldmask, err := system.Umask(0)
|
||||
defer system.Umask(oldmask)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if inUserns {
|
||||
options.InUserNS = true
|
||||
}
|
||||
|
||||
if tmpDir, err = os.MkdirTemp("/", "temp-storage-extract"); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Setenv("TMPDIR", tmpDir)
|
||||
size, err := archive.UnpackLayer("/", os.Stdin, options)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
encoder := json.NewEncoder(os.Stdout)
|
||||
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
|
||||
fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err))
|
||||
}
|
||||
|
||||
if _, err := flush(os.Stdin); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
if unshare.IsRootless() {
|
||||
options.InUserNS = true
|
||||
}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
data, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer json encode: %w", err)
|
||||
}
|
||||
|
||||
cmd := reexec.Command("storage-applyLayer", dest)
|
||||
cmd.Stdin = layer
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("OPT=%s", data))
|
||||
|
||||
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
|
||||
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||
|
||||
if err = cmd.Run(); err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err)
|
||||
}
|
||||
|
||||
// Stdout should be a valid JSON struct representing an applyLayerResponse.
|
||||
response := applyLayerResponse{}
|
||||
decoder := json.NewDecoder(outBuf)
|
||||
if err = decoder.Decode(&response); err != nil {
|
||||
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err)
|
||||
}
|
||||
|
||||
return response.LayerSize, nil
|
||||
}
|
||||
44
vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
generated
vendored
Normal file
44
vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/longpath"
|
||||
)
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// Ensure it is a Windows-style volume path
|
||||
dest = longpath.AddPrefix(dest)
|
||||
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
|
||||
tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
|
||||
}
|
||||
|
||||
s, err := archive.UnpackLayer(dest, layer, nil)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
generated
vendored
Normal file
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
package chrootarchive
|
||||
|
||||
func init() {
|
||||
}
|
||||
29
vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
generated
vendored
Normal file
29
vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Register("storage-applyLayer", applyLayer)
|
||||
reexec.Register("storage-untar", untar)
|
||||
reexec.Register("storage-tar", tar)
|
||||
}
|
||||
|
||||
func fatal(err error) {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// flush consumes all the bytes from the reader discarding
|
||||
// any errors
|
||||
func flush(r io.Reader) (bytes int64, err error) {
|
||||
return io.Copy(io.Discard, r)
|
||||
}
|
||||
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go
generated
vendored
Normal file
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
package chrootarchive
|
||||
|
||||
func init() {
|
||||
}
|
||||
8
vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
generated
vendored
Normal file
8
vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import jsoniter "github.com/json-iterator/go"
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
664
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
Normal file
664
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,664 @@
|
|||
package chunked
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheKey = "chunked-manifest-cache"
|
||||
cacheVersion = 1
|
||||
|
||||
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
type metadata struct {
|
||||
tagLen int
|
||||
digestLen int
|
||||
tags []byte
|
||||
vdata []byte
|
||||
}
|
||||
|
||||
type layer struct {
|
||||
id string
|
||||
metadata *metadata
|
||||
target string
|
||||
}
|
||||
|
||||
type layersCache struct {
|
||||
layers []layer
|
||||
refs int
|
||||
store storage.Store
|
||||
mutex sync.RWMutex
|
||||
created time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
cacheMutex sync.Mutex
|
||||
cache *layersCache
|
||||
)
|
||||
|
||||
func (c *layersCache) release() {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
|
||||
c.refs--
|
||||
if c.refs == 0 {
|
||||
cache = nil
|
||||
}
|
||||
}
|
||||
|
||||
func getLayersCacheRef(store storage.Store) *layersCache {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
|
||||
cache.refs++
|
||||
return cache
|
||||
}
|
||||
cache := &layersCache{
|
||||
store: store,
|
||||
refs: 1,
|
||||
created: time.Now(),
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func getLayersCache(store storage.Store) (*layersCache, error) {
|
||||
c := getLayersCacheRef(store)
|
||||
|
||||
if err := c.load(); err != nil {
|
||||
c.release()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *layersCache) load() error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
allLayers, err := c.store.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingLayers := make(map[string]string)
|
||||
for _, r := range c.layers {
|
||||
existingLayers[r.id] = r.target
|
||||
}
|
||||
|
||||
currentLayers := make(map[string]string)
|
||||
for _, r := range allLayers {
|
||||
currentLayers[r.ID] = r.ID
|
||||
if _, found := existingLayers[r.ID]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
bigData, err := c.store.LayerBigData(r.ID, cacheKey)
|
||||
// if the cache already exists, read and use it
|
||||
if err == nil {
|
||||
defer bigData.Close()
|
||||
metadata, err := readMetadataFromCache(bigData)
|
||||
if err == nil {
|
||||
c.addLayer(r.ID, metadata)
|
||||
continue
|
||||
}
|
||||
logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
var lcd chunkedLayerData
|
||||
|
||||
clFile, err := c.store.LayerBigData(r.ID, chunkedLayerDataKey)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
if clFile != nil {
|
||||
cl, err := io.ReadAll(clFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
|
||||
}
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err := json.Unmarshal(cl, &lcd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise create it from the layer TOC.
|
||||
manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer manifestReader.Close()
|
||||
|
||||
manifest, err := io.ReadAll(manifestReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
|
||||
}
|
||||
|
||||
metadata, err := writeCache(manifest, lcd.Format, r.ID, c.store)
|
||||
if err == nil {
|
||||
c.addLayer(r.ID, metadata)
|
||||
}
|
||||
}
|
||||
|
||||
var newLayers []layer
|
||||
for _, l := range c.layers {
|
||||
if _, found := currentLayers[l.id]; found {
|
||||
newLayers = append(newLayers, l)
|
||||
}
|
||||
}
|
||||
c.layers = newLayers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file
|
||||
// is usable for deduplication with hardlinks.
|
||||
// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs.
|
||||
func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
|
||||
digester := digest.Canonical.Digester()
|
||||
|
||||
modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode)
|
||||
hash := digester.Hash()
|
||||
|
||||
if _, err := hash.Write([]byte(f.Digest)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := hash.Write([]byte(modeString)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(f.Xattrs) > 0 {
|
||||
keys := make([]string, 0, len(f.Xattrs))
|
||||
for k := range f.Xattrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
if _, err := hash.Write([]byte(k)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return string(digester.Digest()), nil
|
||||
}
|
||||
|
||||
// generateFileLocation generates a file location in the form $OFFSET@$PATH
|
||||
func generateFileLocation(path string, offset uint64) []byte {
|
||||
return []byte(fmt.Sprintf("%d@%s", offset, path))
|
||||
}
|
||||
|
||||
// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
|
||||
// the [OFFSET; LEN] points to the variable length data where the file locations
|
||||
// are stored. $DIGEST has length digestLen stored in the metadata file header.
|
||||
func generateTag(digest string, offset, len uint64) string {
|
||||
return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len)
|
||||
}
|
||||
|
||||
type setBigData interface {
|
||||
// SetLayerBigData stores a (possibly large) chunk of named data
|
||||
SetLayerBigData(id, key string, data io.Reader) error
|
||||
}
|
||||
|
||||
// writeCache write a cache for the layer ID.
|
||||
// It generates a sorted list of digests with their offset to the path location and offset.
|
||||
// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
|
||||
// There are 3 kind of digests stored:
|
||||
// - digest(file.payload))
|
||||
// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
|
||||
// - digest(i) for each i in chunks(file payload)
|
||||
func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*metadata, error) {
|
||||
var vdata bytes.Buffer
|
||||
tagLen := 0
|
||||
digestLen := 0
|
||||
var tagsBuffer bytes.Buffer
|
||||
|
||||
toc, err := prepareMetadata(manifest, format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tags []string
|
||||
for _, k := range toc {
|
||||
if k.Digest != "" {
|
||||
location := generateFileLocation(k.Name, 0)
|
||||
|
||||
off := uint64(vdata.Len())
|
||||
l := uint64(len(location))
|
||||
|
||||
d := generateTag(k.Digest, off, l)
|
||||
if tagLen == 0 {
|
||||
tagLen = len(d)
|
||||
}
|
||||
if tagLen != len(d) {
|
||||
return nil, errors.New("digest with different length found")
|
||||
}
|
||||
tags = append(tags, d)
|
||||
|
||||
fp, err := calculateHardLinkFingerprint(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = generateTag(fp, off, l)
|
||||
if tagLen != len(d) {
|
||||
return nil, errors.New("digest with different length found")
|
||||
}
|
||||
tags = append(tags, d)
|
||||
|
||||
if _, err := vdata.Write(location); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
digestLen = len(k.Digest)
|
||||
}
|
||||
if k.ChunkDigest != "" {
|
||||
location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
|
||||
off := uint64(vdata.Len())
|
||||
l := uint64(len(location))
|
||||
d := generateTag(k.ChunkDigest, off, l)
|
||||
if tagLen == 0 {
|
||||
tagLen = len(d)
|
||||
}
|
||||
if tagLen != len(d) {
|
||||
return nil, errors.New("digest with different length found")
|
||||
}
|
||||
tags = append(tags, d)
|
||||
|
||||
if _, err := vdata.Write(location); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digestLen = len(k.ChunkDigest)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
for _, t := range tags {
|
||||
if _, err := tagsBuffer.Write([]byte(t)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer pipeWriter.Close()
|
||||
defer close(errChan)
|
||||
|
||||
// version
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// len of a tag
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// len of a digest
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// tags length
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// vdata length
|
||||
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// tags
|
||||
if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// variable length data
|
||||
if _, err := pipeWriter.Write(vdata.Bytes()); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
}()
|
||||
defer pipeReader.Close()
|
||||
|
||||
counter := ioutils.NewWriteCounter(io.Discard)
|
||||
|
||||
r := io.TeeReader(pipeReader, counter)
|
||||
|
||||
if err := dest.SetLayerBigData(id, cacheKey, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
|
||||
|
||||
return &metadata{
|
||||
digestLen: digestLen,
|
||||
tagLen: tagLen,
|
||||
tags: tagsBuffer.Bytes(),
|
||||
vdata: vdata.Bytes(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
|
||||
var version, tagLen, digestLen, tagsLen, vdataLen uint64
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if version != cacheVersion {
|
||||
return nil, nil //nolint: nilnil
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags := make([]byte, tagsLen)
|
||||
if _, err := bigData.Read(tags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vdata := make([]byte, vdataLen)
|
||||
if _, err := bigData.Read(vdata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &metadata{
|
||||
tagLen: int(tagLen),
|
||||
digestLen: int(digestLen),
|
||||
tags: tags,
|
||||
vdata: vdata,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([]*internal.FileMetadata, error) {
|
||||
toc, err := unmarshalToc(manifest)
|
||||
if err != nil {
|
||||
// ignore errors here. They might be caused by a different manifest format.
|
||||
logrus.Debugf("could not unmarshal manifest: %v", err)
|
||||
return nil, nil //nolint: nilnil
|
||||
}
|
||||
|
||||
switch format {
|
||||
case graphdriver.DifferOutputFormatDir:
|
||||
case graphdriver.DifferOutputFormatFlat:
|
||||
toc.Entries, err = makeEntriesFlat(toc.Entries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown format %q", format)
|
||||
}
|
||||
|
||||
var r []*internal.FileMetadata
|
||||
chunkSeen := make(map[string]bool)
|
||||
for i := range toc.Entries {
|
||||
d := toc.Entries[i].Digest
|
||||
if d != "" {
|
||||
r = append(r, &toc.Entries[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// chunks do not use hard link dedup so keeping just one candidate is enough
|
||||
cd := toc.Entries[i].ChunkDigest
|
||||
if cd != "" && !chunkSeen[cd] {
|
||||
r = append(r, &toc.Entries[i])
|
||||
chunkSeen[cd] = true
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (c *layersCache) addLayer(id string, metadata *metadata) error {
|
||||
target, err := c.store.DifferTarget(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get checkout directory layer %q: %w", id, err)
|
||||
}
|
||||
|
||||
l := layer{
|
||||
id: id,
|
||||
metadata: metadata,
|
||||
target: target,
|
||||
}
|
||||
c.layers = append(c.layers, l)
|
||||
return nil
|
||||
}
|
||||
|
||||
func byteSliceAsString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
|
||||
if len(digest) != metadata.digestLen {
|
||||
return "", 0, 0
|
||||
}
|
||||
|
||||
nElements := len(metadata.tags) / metadata.tagLen
|
||||
|
||||
i := sort.Search(nElements, func(i int) bool {
|
||||
d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen])
|
||||
return strings.Compare(d, digest) >= 0
|
||||
})
|
||||
if i < nElements {
|
||||
d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)])
|
||||
if digest == d {
|
||||
startOff := i*metadata.tagLen + metadata.digestLen
|
||||
parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
|
||||
off, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||
len, _ := strconv.ParseInt(parts[1], 10, 64)
|
||||
return digest, uint64(off), uint64(len)
|
||||
}
|
||||
}
|
||||
return "", 0, 0
|
||||
}
|
||||
|
||||
func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
|
||||
if digest == "" {
|
||||
return "", "", -1, nil
|
||||
}
|
||||
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
for _, layer := range c.layers {
|
||||
digest, off, len := findTag(digest, layer.metadata)
|
||||
if digest != "" {
|
||||
position := string(layer.metadata.vdata[off : off+len])
|
||||
parts := strings.SplitN(position, "@", 2)
|
||||
offFile, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||
return layer.target, parts[1], offFile, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", "", -1, nil
|
||||
}
|
||||
|
||||
// findFileInOtherLayers finds the specified file in other layers.
|
||||
// file is the file to look for.
|
||||
func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) {
|
||||
digest := file.Digest
|
||||
if useHardLinks {
|
||||
var err error
|
||||
digest, err = calculateHardLinkFingerprint(file)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
target, name, off, err := c.findDigestInternal(digest)
|
||||
if off == 0 {
|
||||
return target, name, err
|
||||
}
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
|
||||
return c.findDigestInternal(chunk.ChunkDigest)
|
||||
}
|
||||
|
||||
func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
var buf bytes.Buffer
|
||||
count := 0
|
||||
var toc internal.TOC
|
||||
|
||||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if strings.ToLower(field) != "entries" {
|
||||
iter.Skip()
|
||||
continue
|
||||
}
|
||||
for iter.ReadArray() {
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch strings.ToLower(field) {
|
||||
case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime":
|
||||
count += len(iter.ReadStringAsSlice())
|
||||
case "xattrs":
|
||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||
count += len(iter.ReadStringAsSlice())
|
||||
}
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
buf.Grow(count)
|
||||
|
||||
getString := func(b []byte) string {
|
||||
from := buf.Len()
|
||||
buf.Write(b)
|
||||
to := buf.Len()
|
||||
return byteSliceAsString(buf.Bytes()[from:to])
|
||||
}
|
||||
|
||||
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if strings.ToLower(field) == "version" {
|
||||
toc.Version = iter.ReadInt()
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(field) != "entries" {
|
||||
iter.Skip()
|
||||
continue
|
||||
}
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch strings.ToLower(field) {
|
||||
case "type":
|
||||
m.Type = getString(iter.ReadStringAsSlice())
|
||||
case "name":
|
||||
m.Name = getString(iter.ReadStringAsSlice())
|
||||
case "linkname":
|
||||
m.Linkname = getString(iter.ReadStringAsSlice())
|
||||
case "mode":
|
||||
m.Mode = iter.ReadInt64()
|
||||
case "size":
|
||||
m.Size = iter.ReadInt64()
|
||||
case "uid":
|
||||
m.UID = iter.ReadInt()
|
||||
case "gid":
|
||||
m.GID = iter.ReadInt()
|
||||
case "modtime":
|
||||
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.ModTime = &time
|
||||
case "accesstime":
|
||||
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.AccessTime = &time
|
||||
case "changetime":
|
||||
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.ChangeTime = &time
|
||||
case "devmajor":
|
||||
m.Devmajor = iter.ReadInt64()
|
||||
case "devminor":
|
||||
m.Devminor = iter.ReadInt64()
|
||||
case "digest":
|
||||
m.Digest = getString(iter.ReadStringAsSlice())
|
||||
case "offset":
|
||||
m.Offset = iter.ReadInt64()
|
||||
case "endoffset":
|
||||
m.EndOffset = iter.ReadInt64()
|
||||
case "chunksize":
|
||||
m.ChunkSize = iter.ReadInt64()
|
||||
case "chunkoffset":
|
||||
m.ChunkOffset = iter.ReadInt64()
|
||||
case "chunkdigest":
|
||||
m.ChunkDigest = getString(iter.ReadStringAsSlice())
|
||||
case "chunktype":
|
||||
m.ChunkType = getString(iter.ReadStringAsSlice())
|
||||
case "xattrs":
|
||||
m.Xattrs = make(map[string]string)
|
||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||
value := iter.ReadStringAsSlice()
|
||||
m.Xattrs[key] = getString(value)
|
||||
}
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
}
|
||||
if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
|
||||
m.Digest = digestSha256Empty
|
||||
}
|
||||
toc.Entries = append(toc.Entries, m)
|
||||
}
|
||||
break
|
||||
}
|
||||
toc.StringsBuf = buf
|
||||
return &toc, nil
|
||||
}
|
||||
25
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
Normal file
25
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
package chunked
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeReg = internal.TypeReg
|
||||
TypeChunk = internal.TypeChunk
|
||||
TypeLink = internal.TypeLink
|
||||
TypeChar = internal.TypeChar
|
||||
TypeBlock = internal.TypeBlock
|
||||
TypeDir = internal.TypeDir
|
||||
TypeFifo = internal.TypeFifo
|
||||
TypeSymlink = internal.TypeSymlink
|
||||
)
|
||||
|
||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
|
||||
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||
return compressor.ZstdCompressor(r, metadata, level)
|
||||
}
|
||||
283
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
Normal file
283
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,283 @@
|
|||
package chunked
|
||||
|
||||
import (
|
||||
archivetar "archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
var typesToTar = map[string]byte{
|
||||
TypeReg: tar.TypeReg,
|
||||
TypeLink: tar.TypeLink,
|
||||
TypeChar: tar.TypeChar,
|
||||
TypeBlock: tar.TypeBlock,
|
||||
TypeDir: tar.TypeDir,
|
||||
TypeFifo: tar.TypeFifo,
|
||||
TypeSymlink: tar.TypeSymlink,
|
||||
}
|
||||
|
||||
func typeToTarType(t string) (byte, error) {
|
||||
r, found := typesToTar[t]
|
||||
if !found {
|
||||
return 0, fmt.Errorf("unknown type: %v", t)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||
footerSize := int64(51)
|
||||
if blobSize <= footerSize {
|
||||
return nil, 0, errors.New("blob too small")
|
||||
}
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: uint64(blobSize - footerSize),
|
||||
Length: uint64(footerSize),
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer reader.Close()
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
/* Read the ToC offset:
|
||||
- 10 bytes gzip header
|
||||
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||
- 2 bytes Extra: SI1 = 'S', SI2 = 'G'
|
||||
- 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
|
||||
- 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
|
||||
- 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0
|
||||
- 8 bytes gzip footer
|
||||
*/
|
||||
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
|
||||
}
|
||||
|
||||
size := int64(blobSize - footerSize - tocOffset)
|
||||
// set a reasonable limit
|
||||
if size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
chunk = ImageSourceChunk{
|
||||
Offset: uint64(tocOffset),
|
||||
Length: uint64(size),
|
||||
}
|
||||
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var tocReader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
tocReader = r
|
||||
case err := <-errs:
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tocReader.Close()
|
||||
|
||||
r, err := pgzip.NewReader(tocReader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
aTar := archivetar.NewReader(r)
|
||||
|
||||
header, err := aTar.Next()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// set a reasonable limit
|
||||
if header.Size > (1<<20)*50 {
|
||||
return nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
manifestUncompressed := make([]byte, header.Size)
|
||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
manifestDigester := digest.Canonical.Digester()
|
||||
manifestChecksum := manifestDigester.Hash()
|
||||
if _, err := manifestChecksum.Write(manifestUncompressed); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if manifestDigester.Digest() != d {
|
||||
return nil, 0, errors.New("invalid manifest checksum")
|
||||
}
|
||||
|
||||
return manifestUncompressed, tocOffset, nil
|
||||
}
|
||||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
||||
// be specified.
|
||||
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) {
|
||||
footerSize := int64(internal.FooterSizeSupported)
|
||||
if blobSize <= footerSize {
|
||||
return nil, nil, 0, errors.New("blob too small")
|
||||
}
|
||||
|
||||
var footerData internal.ZstdChunkedFooterData
|
||||
|
||||
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
||||
var err error
|
||||
footerData, err = internal.ReadFooterDataFromAnnotations(annotations)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
} else {
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: uint64(blobSize - footerSize),
|
||||
Length: uint64(footerSize),
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
footerData, err = internal.ReadFooterDataFromBlob(footer)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if footerData.ManifestType != internal.ManifestTypeCRFS {
|
||||
return nil, nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if footerData.LengthCompressed > (1<<20)*50 {
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
if footerData.LengthUncompressed > (1<<20)*50 {
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: footerData.Offset,
|
||||
Length: footerData.LengthCompressed,
|
||||
}
|
||||
|
||||
chunks := []ImageSourceChunk{chunk}
|
||||
|
||||
if footerData.OffsetTarSplit > 0 {
|
||||
chunkTarSplit := ImageSourceChunk{
|
||||
Offset: footerData.OffsetTarSplit,
|
||||
Length: footerData.LengthCompressedTarSplit,
|
||||
}
|
||||
chunks = append(chunks, chunkTarSplit)
|
||||
}
|
||||
|
||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blob := make([]byte, len)
|
||||
if _, err := io.ReadFull(reader, blob); err != nil {
|
||||
reader.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err := reader.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
|
||||
manifest, err := readBlob(footerData.LengthCompressed)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
decodedBlob, err := decodeAndValidateBlob(manifest, footerData.LengthUncompressed, footerData.ChecksumAnnotation)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
decodedTarSplit := []byte{}
|
||||
if footerData.OffsetTarSplit > 0 {
|
||||
tarSplit, err := readBlob(footerData.LengthCompressedTarSplit)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, footerData.LengthUncompressedTarSplit, footerData.ChecksumAnnotationTarSplit)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
return decodedBlob, decodedTarSplit, int64(footerData.Offset), err
|
||||
}
|
||||
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
|
||||
d, err := digest.Parse(expectedUncompressedChecksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobDigester := d.Algorithm().Digester()
|
||||
blobChecksum := blobDigester.Hash()
|
||||
if _, err := blobChecksum.Write(blob); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blobDigester.Digest() != d {
|
||||
return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
|
||||
}
|
||||
|
||||
decoder, err := zstd.NewReader(nil) //nolint:contextcheck
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer decoder.Close()
|
||||
|
||||
b := make([]byte, 0, lengthUncompressed)
|
||||
return decoder.DecodeAll(blob, b)
|
||||
}
|
||||
230
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
Normal file
230
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
package dump
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
ESCAPE_STANDARD = 0
|
||||
NOESCAPE_SPACE = 1 << iota
|
||||
ESCAPE_EQUAL
|
||||
ESCAPE_LONE_DASH
|
||||
)
|
||||
|
||||
func escaped(val string, escape int) string {
|
||||
noescapeSpace := escape&NOESCAPE_SPACE != 0
|
||||
escapeEqual := escape&ESCAPE_EQUAL != 0
|
||||
escapeLoneDash := escape&ESCAPE_LONE_DASH != 0
|
||||
|
||||
length := len(val)
|
||||
|
||||
if escapeLoneDash && val == "-" {
|
||||
return fmt.Sprintf("\\x%.2x", val[0])
|
||||
}
|
||||
|
||||
var result string
|
||||
for i := 0; i < length; i++ {
|
||||
c := val[i]
|
||||
hexEscape := false
|
||||
var special string
|
||||
|
||||
switch c {
|
||||
case '\\':
|
||||
special = "\\\\"
|
||||
case '\n':
|
||||
special = "\\n"
|
||||
case '\r':
|
||||
special = "\\r"
|
||||
case '\t':
|
||||
special = "\\t"
|
||||
case '=':
|
||||
hexEscape = escapeEqual
|
||||
default:
|
||||
if noescapeSpace {
|
||||
hexEscape = !unicode.IsPrint(rune(c))
|
||||
} else {
|
||||
hexEscape = !unicode.IsGraphic(rune(c))
|
||||
}
|
||||
}
|
||||
|
||||
if special != "" {
|
||||
result += special
|
||||
} else if hexEscape {
|
||||
result += fmt.Sprintf("\\x%.2x", c)
|
||||
} else {
|
||||
result += string(c)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func escapedOptional(val string, escape int) string {
|
||||
if val == "" {
|
||||
return "-"
|
||||
}
|
||||
return escaped(val, escape)
|
||||
}
|
||||
|
||||
func getStMode(mode uint32, typ string) (uint32, error) {
|
||||
switch typ {
|
||||
case internal.TypeReg, internal.TypeLink:
|
||||
mode |= unix.S_IFREG
|
||||
case internal.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case internal.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case internal.TypeDir:
|
||||
mode |= unix.S_IFDIR
|
||||
case internal.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
case internal.TypeSymlink:
|
||||
mode |= unix.S_IFLNK
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown type %s", typ)
|
||||
}
|
||||
return mode, nil
|
||||
}
|
||||
|
||||
func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
|
||||
path := entry.Name
|
||||
if path == "" {
|
||||
path = "/"
|
||||
} else if path[0] != '/' {
|
||||
path = "/" + path
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nlinks := links[entry.Name] + links[entry.Linkname] + 1
|
||||
link := ""
|
||||
if entry.Type == internal.TypeLink {
|
||||
link = "@"
|
||||
}
|
||||
|
||||
rdev := unix.Mkdev(uint32(entry.Devmajor), uint32(entry.Devminor))
|
||||
|
||||
entryTime := entry.ModTime
|
||||
if entryTime == nil {
|
||||
t := time.Unix(0, 0)
|
||||
entryTime = &t
|
||||
}
|
||||
|
||||
mode, err := getStMode(uint32(entry.Mode), entry.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(out, " %d %s%o %d %d %d %d %d.%d ", entry.Size,
|
||||
link, mode,
|
||||
nlinks, entry.UID, entry.GID, rdev,
|
||||
entryTime.Unix(), entryTime.Nanosecond()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var payload string
|
||||
if entry.Linkname != "" {
|
||||
payload = entry.Linkname
|
||||
if entry.Type == internal.TypeLink && payload[0] != '/' {
|
||||
payload = "/" + payload
|
||||
}
|
||||
} else {
|
||||
if len(entry.Digest) > 10 {
|
||||
d := strings.Replace(entry.Digest, "sha256:", "", 1)
|
||||
payload = d[:2] + "/" + d[2:]
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* inline content. */
|
||||
if _, err := fmt.Fprint(out, " -"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* store the digest. */
|
||||
if _, err := fmt.Fprint(out, " "); err != nil {
|
||||
return err
|
||||
}
|
||||
digest := verityDigests[payload]
|
||||
if _, err := fmt.Fprintf(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range entry.Xattrs {
|
||||
name := escaped(k, ESCAPE_EQUAL)
|
||||
value := escaped(v, ESCAPE_EQUAL)
|
||||
|
||||
if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprint(out, "\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
|
||||
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
|
||||
toc, ok := tocI.(*internal.TOC)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid TOC type")
|
||||
}
|
||||
pipeR, pipeW := io.Pipe()
|
||||
go func() {
|
||||
closed := false
|
||||
w := bufio.NewWriter(pipeW)
|
||||
defer func() {
|
||||
if !closed {
|
||||
w.Flush()
|
||||
pipeW.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
links := make(map[string]int)
|
||||
for _, e := range toc.Entries {
|
||||
if e.Linkname == "" {
|
||||
continue
|
||||
}
|
||||
links[e.Linkname] = links[e.Linkname] + 1
|
||||
}
|
||||
|
||||
if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") {
|
||||
root := &internal.FileMetadata{
|
||||
Name: "/",
|
||||
Type: internal.TypeDir,
|
||||
Mode: 0o755,
|
||||
}
|
||||
|
||||
if err := dumpNode(w, links, verityDigests, root); err != nil {
|
||||
pipeW.CloseWithError(err)
|
||||
closed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range toc.Entries {
|
||||
if e.Type == internal.TypeChunk {
|
||||
continue
|
||||
}
|
||||
if err := dumpNode(w, links, verityDigests, &e); err != nil {
|
||||
pipeW.CloseWithError(err)
|
||||
closed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return pipeR, nil
|
||||
}
|
||||
25
vendor/github.com/containers/storage/pkg/chunked/storage.go
generated
vendored
Normal file
25
vendor/github.com/containers/storage/pkg/chunked/storage.go
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
package chunked
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
type ImageSourceChunk struct {
|
||||
Offset uint64
|
||||
Length uint64
|
||||
}
|
||||
|
||||
// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob.
|
||||
type ImageSourceSeekable interface {
|
||||
// GetBlobAt returns a stream for the specified blob.
|
||||
GetBlobAt([]ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||
}
|
||||
|
||||
// ErrBadRequest is returned when the request is not valid
|
||||
type ErrBadRequest struct { //nolint: errname
|
||||
}
|
||||
|
||||
func (e ErrBadRequest) Error() string {
|
||||
return "bad request"
|
||||
}
|
||||
1989
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
Normal file
1989
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
17
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package chunked
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
)
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
return nil, errors.New("format not supported on this system")
|
||||
}
|
||||
342
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
Normal file
342
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
|
||||
// TOML config table.
|
||||
type ThinpoolOptionsConfig struct {
|
||||
// AutoExtendPercent determines the amount by which pool needs to be
|
||||
// grown. This is specified in terms of % of pool size. So a value of
|
||||
// 20 means that when threshold is hit, pool will be grown by 20% of
|
||||
// existing pool size.
|
||||
AutoExtendPercent string `toml:"autoextend_percent,omitempty"`
|
||||
|
||||
// AutoExtendThreshold determines the pool extension threshold in terms
|
||||
// of percentage of pool size. For example, if threshold is 60, that
|
||||
// means when pool is 60% full, threshold has been hit.
|
||||
AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"`
|
||||
|
||||
// BaseSize specifies the size to use when creating the base device,
|
||||
// which limits the size of images and containers.
|
||||
BaseSize string `toml:"basesize,omitempty"`
|
||||
|
||||
// BlockSize specifies a custom blocksize to use for the thin pool.
|
||||
BlockSize string `toml:"blocksize,omitempty"`
|
||||
|
||||
// DirectLvmDevice specifies a custom block storage device to use for
|
||||
// the thin pool.
|
||||
DirectLvmDevice string `toml:"directlvm_device,omitempty"`
|
||||
|
||||
// DirectLvmDeviceForcewipes device even if device already has a
|
||||
// filesystem
|
||||
DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"`
|
||||
|
||||
// Fs specifies the filesystem type to use for the base device.
|
||||
Fs string `toml:"fs,omitempty"`
|
||||
|
||||
// log_level sets the log level of devicemapper.
|
||||
LogLevel string `toml:"log_level,omitempty"`
|
||||
|
||||
// MetadataSize specifies the size of the metadata for the thinpool
|
||||
// It will be used with the `pvcreate --metadata` option.
|
||||
MetadataSize string `toml:"metadatasize,omitempty"`
|
||||
|
||||
// MinFreeSpace specifies the min free space percent in a thin pool
|
||||
// require for new device creation to
|
||||
MinFreeSpace string `toml:"min_free_space,omitempty"`
|
||||
|
||||
// MkfsArg specifies extra mkfs arguments to be used when creating the
|
||||
// basedevice.
|
||||
MkfsArg string `toml:"mkfsarg,omitempty"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting the thin
|
||||
// devices.
|
||||
MountOpt string `toml:"mountopt,omitempty"`
|
||||
|
||||
// Size
|
||||
Size string `toml:"size,omitempty"`
|
||||
|
||||
// UseDeferredDeletion marks device for deferred deletion
|
||||
UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"`
|
||||
|
||||
// UseDeferredRemoval marks device for deferred removal
|
||||
UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"`
|
||||
|
||||
// XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
|
||||
// retries XFS should attempt to complete IO when ENOSPC (no space)
|
||||
// error is returned by underlying storage device.
|
||||
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"`
|
||||
}
|
||||
|
||||
type AufsOptionsConfig struct {
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt,omitempty"`
|
||||
}
|
||||
|
||||
type BtrfsOptionsConfig struct {
|
||||
// MinSpace is the minimal spaces allocated to the device
|
||||
MinSpace string `toml:"min_space,omitempty"`
|
||||
// Size
|
||||
Size string `toml:"size,omitempty"`
|
||||
}
|
||||
|
||||
type OverlayOptionsConfig struct {
|
||||
// IgnoreChownErrors is a flag for whether chown errors should be
|
||||
// ignored when building an image.
|
||||
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt,omitempty"`
|
||||
// Alternative program to use for the mount of the file system
|
||||
MountProgram string `toml:"mount_program,omitempty"`
|
||||
// Size
|
||||
Size string `toml:"size,omitempty"`
|
||||
// Inodes is used to set a maximum inodes of the container image.
|
||||
Inodes string `toml:"inodes,omitempty"`
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home,omitempty"`
|
||||
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
|
||||
// files and directories
|
||||
ForceMask string `toml:"force_mask,omitempty"`
|
||||
}
|
||||
|
||||
type VfsOptionsConfig struct {
|
||||
// IgnoreChownErrors is a flag for whether chown errors should be
|
||||
// ignored when building an image.
|
||||
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
|
||||
}
|
||||
|
||||
type ZfsOptionsConfig struct {
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt,omitempty"`
|
||||
// Name is the File System name of the ZFS File system
|
||||
Name string `toml:"fsname,omitempty"`
|
||||
// Size
|
||||
Size string `toml:"size,omitempty"`
|
||||
}
|
||||
|
||||
// OptionsConfig represents the "storage.options" TOML config table.
|
||||
type OptionsConfig struct {
|
||||
// AdditionalImagesStores is the location of additional read/only
|
||||
// Image stores. Usually used to access Networked File System
|
||||
// for shared image content
|
||||
AdditionalImageStores []string `toml:"additionalimagestores,omitempty"`
|
||||
|
||||
// ImageStore is the location of image store which is separated from the
|
||||
// container store. Usually this is not recommended unless users wants
|
||||
// separate store for image and containers.
|
||||
ImageStore string `toml:"imagestore,omitempty"`
|
||||
|
||||
// AdditionalLayerStores is the location of additional read/only
|
||||
// Layer stores. Usually used to access Networked File System
|
||||
// for shared image content
|
||||
// This API is experimental and can be changed without bumping the
|
||||
// major version number.
|
||||
AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"`
|
||||
|
||||
// Size
|
||||
Size string `toml:"size,omitempty"`
|
||||
|
||||
// RemapUIDs is a list of default UID mappings to use for layers.
|
||||
RemapUIDs string `toml:"remap-uids,omitempty"`
|
||||
// RemapGIDs is a list of default GID mappings to use for layers.
|
||||
RemapGIDs string `toml:"remap-gids,omitempty"`
|
||||
// IgnoreChownErrors is a flag for whether chown errors should be
|
||||
// ignored when building an image.
|
||||
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
|
||||
|
||||
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
|
||||
// files and directories.
|
||||
ForceMask os.FileMode `toml:"force_mask,omitempty"`
|
||||
|
||||
// RemapUser is the name of one or more entries in /etc/subuid which
|
||||
// should be used to set up default UID mappings.
|
||||
RemapUser string `toml:"remap-user,omitempty"`
|
||||
// RemapGroup is the name of one or more entries in /etc/subgid which
|
||||
// should be used to set up default GID mappings.
|
||||
RemapGroup string `toml:"remap-group,omitempty"`
|
||||
|
||||
// RootAutoUsernsUser is the name of one or more entries in /etc/subuid and
|
||||
// /etc/subgid which should be used to set up automatically a userns.
|
||||
RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"`
|
||||
|
||||
// AutoUsernsMinSize is the minimum size for a user namespace that is
|
||||
// created automatically.
|
||||
AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"`
|
||||
|
||||
// AutoUsernsMaxSize is the maximum size for a user namespace that is
|
||||
// created automatically.
|
||||
AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"`
|
||||
|
||||
// Aufs container options to be handed to aufs drivers
|
||||
Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"`
|
||||
|
||||
// Btrfs container options to be handed to btrfs drivers
|
||||
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
|
||||
|
||||
// Thinpool container options to be handed to thinpool drivers
|
||||
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"`
|
||||
|
||||
// Overlay container options to be handed to overlay drivers
|
||||
Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
|
||||
|
||||
// Vfs container options to be handed to VFS drivers
|
||||
Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"`
|
||||
|
||||
// Zfs container options to be handed to ZFS drivers
|
||||
Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"`
|
||||
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home,omitempty"`
|
||||
|
||||
// Alternative program to use for the mount of the file system
|
||||
MountProgram string `toml:"mount_program,omitempty"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt,omitempty"`
|
||||
|
||||
// PullOptions specifies options to be handed to pull managers
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
PullOptions map[string]string `toml:"pull_options,omitempty"`
|
||||
|
||||
// DisableVolatile doesn't allow volatile mounts when it is set.
|
||||
DisableVolatile bool `toml:"disable-volatile,omitempty"`
|
||||
}
|
||||
|
||||
// GetGraphDriverOptions returns the driver specific options
|
||||
func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
|
||||
var doptions []string
|
||||
switch driverName {
|
||||
case "aufs":
|
||||
if options.Aufs.MountOpt != "" {
|
||||
return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
|
||||
case "btrfs":
|
||||
if options.Btrfs.MinSpace != "" {
|
||||
return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace))
|
||||
}
|
||||
if options.Btrfs.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
|
||||
case "devicemapper":
|
||||
if options.Thinpool.AutoExtendPercent != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent))
|
||||
}
|
||||
if options.Thinpool.AutoExtendThreshold != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold))
|
||||
}
|
||||
if options.Thinpool.BaseSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize))
|
||||
}
|
||||
if options.Thinpool.BlockSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize))
|
||||
}
|
||||
if options.Thinpool.DirectLvmDevice != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice))
|
||||
}
|
||||
if options.Thinpool.DirectLvmDeviceForce != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce))
|
||||
}
|
||||
if options.Thinpool.Fs != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs))
|
||||
}
|
||||
if options.Thinpool.LogLevel != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
|
||||
}
|
||||
if options.Thinpool.MetadataSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize))
|
||||
}
|
||||
if options.Thinpool.MinFreeSpace != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
|
||||
}
|
||||
if options.Thinpool.MkfsArg != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg))
|
||||
}
|
||||
if options.Thinpool.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
|
||||
if options.Thinpool.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
|
||||
if options.Thinpool.UseDeferredDeletion != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion))
|
||||
}
|
||||
if options.Thinpool.UseDeferredRemoval != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval))
|
||||
}
|
||||
if options.Thinpool.XfsNoSpaceMaxRetries != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
|
||||
}
|
||||
|
||||
case "overlay", "overlay2":
|
||||
if options.Overlay.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
|
||||
} else if options.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors))
|
||||
}
|
||||
if options.Overlay.MountProgram != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram))
|
||||
} else if options.MountProgram != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram))
|
||||
}
|
||||
if options.Overlay.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
if options.Overlay.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
if options.Overlay.Inodes != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.inodes=%s", driverName, options.Overlay.Inodes))
|
||||
}
|
||||
if options.Overlay.SkipMountHome != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome))
|
||||
} else if options.SkipMountHome != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome))
|
||||
}
|
||||
if options.Overlay.ForceMask != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.Overlay.ForceMask))
|
||||
} else if options.ForceMask != 0 {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask))
|
||||
}
|
||||
case "vfs":
|
||||
if options.Vfs.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
|
||||
} else if options.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors))
|
||||
}
|
||||
|
||||
case "zfs":
|
||||
if options.Zfs.Name != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name))
|
||||
}
|
||||
if options.Zfs.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
if options.Zfs.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
}
|
||||
return doptions
|
||||
}
|
||||
813
vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
generated
vendored
Normal file
813
vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,813 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Same as DM_DEVICE_* enum values from libdevmapper.h
|
||||
// nolint: unused
|
||||
const (
|
||||
deviceCreate TaskType = iota
|
||||
deviceReload
|
||||
deviceRemove
|
||||
deviceRemoveAll
|
||||
deviceSuspend
|
||||
deviceResume
|
||||
deviceInfo
|
||||
deviceDeps
|
||||
deviceRename
|
||||
deviceVersion
|
||||
deviceStatus
|
||||
deviceTable
|
||||
deviceWaitevent
|
||||
deviceList
|
||||
deviceClear
|
||||
deviceMknodes
|
||||
deviceListVersions
|
||||
deviceTargetMsg
|
||||
deviceSetGeometry
|
||||
)
|
||||
|
||||
const (
|
||||
addNodeOnResume AddNodeType = iota
|
||||
addNodeOnCreate
|
||||
)
|
||||
|
||||
// List of errors returned when using devicemapper.
|
||||
var (
|
||||
ErrTaskRun = errors.New("dm_task_run failed")
|
||||
ErrTaskSetName = errors.New("dm_task_set_name failed")
|
||||
ErrTaskSetMessage = errors.New("dm_task_set_message failed")
|
||||
ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed")
|
||||
ErrTaskSetRo = errors.New("dm_task_set_ro failed")
|
||||
ErrTaskAddTarget = errors.New("dm_task_add_target failed")
|
||||
ErrTaskSetSector = errors.New("dm_task_set_sector failed")
|
||||
ErrTaskGetDeps = errors.New("dm_task_get_deps failed")
|
||||
ErrTaskGetInfo = errors.New("dm_task_get_info failed")
|
||||
ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
|
||||
ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed")
|
||||
ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
|
||||
ErrNilCookie = errors.New("cookie ptr can't be nil")
|
||||
ErrGetBlockSize = errors.New("Can't get block size")
|
||||
ErrUdevWait = errors.New("wait on udev cookie failed")
|
||||
ErrSetDevDir = errors.New("dm_set_dev_dir failed")
|
||||
ErrGetLibraryVersion = errors.New("dm_get_library_version failed")
|
||||
ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove")
|
||||
ErrRunRemoveDevice = errors.New("running RemoveDevice failed")
|
||||
ErrInvalidAddNode = errors.New("Invalid AddNode type")
|
||||
ErrBusy = errors.New("Device is Busy")
|
||||
ErrDeviceIDExists = errors.New("Device Id Exists")
|
||||
ErrEnxio = errors.New("No such device or address")
|
||||
)
|
||||
|
||||
var (
|
||||
dmSawBusy bool
|
||||
dmSawExist bool
|
||||
dmSawEnxio bool // No Such Device or Address
|
||||
)
|
||||
|
||||
type (
|
||||
// Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl
|
||||
// command to execute.
|
||||
Task struct {
|
||||
unmanaged *cdmTask
|
||||
}
|
||||
// Deps represents dependents (layer) of a device.
|
||||
Deps struct {
|
||||
Count uint32
|
||||
Filler uint32
|
||||
Device []uint64
|
||||
}
|
||||
// Info represents information about a device.
|
||||
Info struct {
|
||||
Exists int
|
||||
Suspended int
|
||||
LiveTable int
|
||||
InactiveTable int
|
||||
OpenCount int32
|
||||
EventNr uint32
|
||||
Major uint32
|
||||
Minor uint32
|
||||
ReadOnly int
|
||||
TargetCount int32
|
||||
DeferredRemove int
|
||||
}
|
||||
// TaskType represents a type of task
|
||||
TaskType int
|
||||
// AddNodeType represents a type of node to be added
|
||||
AddNodeType int
|
||||
)
|
||||
|
||||
// DeviceIDExists returns whether error conveys the information about device Id already
|
||||
// exist or not. This will be true if device creation or snap creation
|
||||
// operation fails if device or snap device already exists in pool.
|
||||
// Current implementation is little crude as it scans the error string
|
||||
// for exact pattern match. Replacing it with more robust implementation
|
||||
// is desirable.
|
||||
func DeviceIDExists(err error) bool {
|
||||
return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists)
|
||||
}
|
||||
|
||||
func (t *Task) destroy() {
|
||||
if t != nil {
|
||||
DmTaskDestroy(t.unmanaged)
|
||||
runtime.SetFinalizer(t, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// TaskCreateNamed is a convenience function for TaskCreate when a name
|
||||
// will be set on the task as well
|
||||
func TaskCreateNamed(t TaskType, name string) (*Task, error) {
|
||||
task := TaskCreate(t)
|
||||
if task == nil {
|
||||
return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t))
|
||||
}
|
||||
if err := task.setName(name); err != nil {
|
||||
return nil, fmt.Errorf("devicemapper: Can't set task name %s", name)
|
||||
}
|
||||
return task, nil
|
||||
}
|
||||
|
||||
// TaskCreate initializes a devicemapper task of tasktype
|
||||
func TaskCreate(tasktype TaskType) *Task {
|
||||
Ctask := DmTaskCreate(int(tasktype))
|
||||
if Ctask == nil {
|
||||
return nil
|
||||
}
|
||||
task := &Task{unmanaged: Ctask}
|
||||
runtime.SetFinalizer(task, (*Task).destroy)
|
||||
return task
|
||||
}
|
||||
|
||||
func (t *Task) run() error {
|
||||
if res := DmTaskRun(t.unmanaged); res != 1 {
|
||||
return ErrTaskRun
|
||||
}
|
||||
runtime.KeepAlive(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setName(name string) error {
|
||||
if res := DmTaskSetName(t.unmanaged, name); res != 1 {
|
||||
return ErrTaskSetName
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setMessage(message string) error {
|
||||
if res := DmTaskSetMessage(t.unmanaged, message); res != 1 {
|
||||
return ErrTaskSetMessage
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setSector(sector uint64) error {
|
||||
if res := DmTaskSetSector(t.unmanaged, sector); res != 1 {
|
||||
return ErrTaskSetSector
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setCookie(cookie *uint, flags uint16) error {
|
||||
if cookie == nil {
|
||||
return ErrNilCookie
|
||||
}
|
||||
if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 {
|
||||
return ErrTaskSetCookie
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setAddNode(addNode AddNodeType) error {
|
||||
if addNode != addNodeOnResume && addNode != addNodeOnCreate {
|
||||
return ErrInvalidAddNode
|
||||
}
|
||||
if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 {
|
||||
return ErrTaskSetAddNode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) addTarget(start, size uint64, ttype, params string) error {
|
||||
if res := DmTaskAddTarget(t.unmanaged, start, size,
|
||||
ttype, params); res != 1 {
|
||||
return ErrTaskAddTarget
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) getDeps() (*Deps, error) { //nolint:unused
|
||||
var deps *Deps
|
||||
if deps = DmTaskGetDeps(t.unmanaged); deps == nil {
|
||||
return nil, ErrTaskGetDeps
|
||||
}
|
||||
return deps, nil
|
||||
}
|
||||
|
||||
func (t *Task) getInfo() (*Info, error) {
|
||||
info := &Info{}
|
||||
if res := DmTaskGetInfo(t.unmanaged, info); res != 1 {
|
||||
return nil, ErrTaskGetInfo
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (t *Task) getInfoWithDeferred() (*Info, error) {
|
||||
info := &Info{}
|
||||
if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 {
|
||||
return nil, ErrTaskGetInfo
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (t *Task) getDriverVersion() (string, error) {
|
||||
res := DmTaskGetDriverVersion(t.unmanaged)
|
||||
if res == "" {
|
||||
return "", ErrTaskGetDriverVersion
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
|
||||
length uint64, targetType string, params string,
|
||||
) {
|
||||
return DmGetNextTarget(t.unmanaged, next, &start, &length,
|
||||
&targetType, ¶ms),
|
||||
start, length, targetType, params
|
||||
}
|
||||
|
||||
// UdevWait waits for any processes that are waiting for udev to complete the specified cookie.
|
||||
func UdevWait(cookie *uint) error {
|
||||
if res := DmUdevWait(*cookie); res != 1 {
|
||||
logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res)
|
||||
return ErrUdevWait
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDevDir sets the dev folder for the device mapper library (usually /dev).
|
||||
func SetDevDir(dir string) error {
|
||||
if res := DmSetDevDir(dir); res != 1 {
|
||||
logrus.Debug("devicemapper: Error dm_set_dev_dir")
|
||||
return ErrSetDevDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLibraryVersion returns the device mapper library version.
|
||||
func GetLibraryVersion() (string, error) {
|
||||
var version string
|
||||
if res := DmGetLibraryVersion(&version); res != 1 {
|
||||
return "", ErrGetLibraryVersion
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// UdevSyncSupported returns whether device-mapper is able to sync with udev
|
||||
//
|
||||
// This is essential otherwise race conditions can arise where both udev and
|
||||
// device-mapper attempt to create and destroy devices.
|
||||
func UdevSyncSupported() bool {
|
||||
return DmUdevGetSyncSupport() != 0
|
||||
}
|
||||
|
||||
// UdevSetSyncSupport allows setting whether the udev sync should be enabled.
|
||||
// The return bool indicates the state of whether the sync is enabled.
|
||||
func UdevSetSyncSupport(enable bool) bool {
|
||||
if enable {
|
||||
DmUdevSetSyncSupport(1)
|
||||
} else {
|
||||
DmUdevSetSyncSupport(0)
|
||||
}
|
||||
|
||||
return UdevSyncSupported()
|
||||
}
|
||||
|
||||
// CookieSupported returns whether the version of device-mapper supports the
|
||||
// use of cookie's in the tasks.
|
||||
// This is largely a lower level call that other functions use.
|
||||
func CookieSupported() bool {
|
||||
return DmCookieSupported() != 0
|
||||
}
|
||||
|
||||
// RemoveDevice is a useful helper for cleaning up a device.
|
||||
func RemoveDevice(name string) error {
|
||||
task, err := TaskCreateNamed(deviceRemove, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
dmSawBusy = false // reset before the task is run
|
||||
dmSawEnxio = false
|
||||
if err = task.run(); err != nil {
|
||||
if dmSawBusy {
|
||||
return ErrBusy
|
||||
}
|
||||
if dmSawEnxio {
|
||||
return ErrEnxio
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred.
|
||||
func RemoveDeviceDeferred(name string) error {
|
||||
logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name)
|
||||
defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name)
|
||||
task, err := TaskCreateNamed(deviceRemove, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := DmTaskDeferredRemove(task.unmanaged); err != 1 {
|
||||
return ErrTaskDeferredRemove
|
||||
}
|
||||
|
||||
// set a task cookie and disable library fallback, or else libdevmapper will
|
||||
// disable udev dm rules and delete the symlink under /dev/mapper by itself,
|
||||
// even if the removal is deferred by the kernel.
|
||||
cookie := new(uint)
|
||||
flags := uint16(DmUdevDisableLibraryFallback)
|
||||
if err := task.setCookie(cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
|
||||
// libdevmapper and udev relies on System V semaphore for synchronization,
|
||||
// semaphores created in `task.setCookie` will be cleaned up in `UdevWait`.
|
||||
// So these two function call must come in pairs, otherwise semaphores will
|
||||
// be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem`
|
||||
// will be reached, which will eventually make all following calls to 'task.SetCookie'
|
||||
// fail.
|
||||
// this call will not wait for the deferred removal's final executing, since no
|
||||
// udev event will be generated, and the semaphore's value will not be incremented
|
||||
// by udev, what UdevWait is just cleaning up the semaphore.
|
||||
defer UdevWait(cookie)
|
||||
|
||||
dmSawEnxio = false
|
||||
if err = task.run(); err != nil {
|
||||
if dmSawEnxio {
|
||||
return ErrEnxio
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelDeferredRemove cancels a deferred remove for a device.
|
||||
func CancelDeferredRemove(deviceName string) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, deviceName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage("@cancel_deferred_remove"); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawBusy = false
|
||||
dmSawEnxio = false
|
||||
if err := task.run(); err != nil {
|
||||
// A device might be being deleted already
|
||||
if dmSawBusy {
|
||||
return ErrBusy
|
||||
} else if dmSawEnxio {
|
||||
return ErrEnxio
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockDeviceSize returns the size of a block device identified by the specified file.
|
||||
func GetBlockDeviceSize(file *os.File) (uint64, error) {
|
||||
size, err := ioctlBlkGetSize64(file.Fd())
|
||||
if err != nil {
|
||||
logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err)
|
||||
return 0, ErrGetBlockSize
|
||||
}
|
||||
return uint64(size), nil
|
||||
}
|
||||
|
||||
// BlockDeviceDiscard runs discard for the given path.
|
||||
// This is used as a workaround for the kernel not discarding block so
|
||||
// on the thin pool when we remove a thinp device, so we do it
|
||||
// manually
|
||||
func BlockDeviceDiscard(path string) error {
|
||||
file, err := os.OpenFile(path, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
size, err := GetBlockDeviceSize(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Without this sometimes the remove of the device that happens after
|
||||
// discard fails with EBUSY.
|
||||
unix.Sync()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreatePool is the programmatic example of "dmsetup create".
|
||||
// It creates a device with the specified poolName, data and metadata file and block size.
|
||||
func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
|
||||
task, err := TaskCreateNamed(deviceCreate, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
size, err := GetBlockDeviceSize(dataFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't get data size %s", err)
|
||||
}
|
||||
|
||||
params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
|
||||
if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag)
|
||||
if err := task.setCookie(cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReloadPool is the programmatic example of "dmsetup reload".
|
||||
// It reloads the table with the specified poolName, data and metadata file and block size.
|
||||
func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
|
||||
task, err := TaskCreateNamed(deviceReload, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
size, err := GetBlockDeviceSize(dataFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't get data size %s", err)
|
||||
}
|
||||
|
||||
params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
|
||||
if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
||||
}
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running ReloadPool %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDeps is the programmatic example of "dmsetup deps".
|
||||
// It outputs a list of devices referenced by the live table for the specified device.
|
||||
func GetDeps(name string) (*Deps, error) {
|
||||
task, err := TaskCreateNamed(deviceDeps, name)
|
||||
if task == nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return task.getDeps()
|
||||
}
|
||||
|
||||
// GetInfo is the programmatic example of "dmsetup info".
|
||||
// It outputs some brief information about the device.
|
||||
func GetInfo(name string) (*Info, error) {
|
||||
task, err := TaskCreateNamed(deviceInfo, name)
|
||||
if task == nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return task.getInfo()
|
||||
}
|
||||
|
||||
// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred.
|
||||
// It outputs some brief information about the device.
|
||||
func GetInfoWithDeferred(name string) (*Info, error) {
|
||||
task, err := TaskCreateNamed(deviceInfo, name)
|
||||
if task == nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return task.getInfoWithDeferred()
|
||||
}
|
||||
|
||||
// GetDriverVersion is the programmatic example of "dmsetup version".
|
||||
// It outputs version information of the driver.
|
||||
func GetDriverVersion() (string, error) {
|
||||
task := TaskCreate(deviceVersion)
|
||||
if task == nil {
|
||||
return "", fmt.Errorf("devicemapper: Can't create deviceVersion task")
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return task.getDriverVersion()
|
||||
}
|
||||
|
||||
// GetStatus is the programmatic example of "dmsetup status".
|
||||
// It outputs status information for the specified device name.
|
||||
func GetStatus(name string) (uint64, uint64, string, string, error) {
|
||||
task, err := TaskCreateNamed(deviceStatus, name)
|
||||
if task == nil {
|
||||
logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
|
||||
devinfo, err := task.getInfo()
|
||||
if err != nil {
|
||||
logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if devinfo.Exists == 0 {
|
||||
logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name)
|
||||
return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
|
||||
}
|
||||
|
||||
_, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
|
||||
return start, length, targetType, params, nil
|
||||
}
|
||||
|
||||
// GetTable is the programmatic example for "dmsetup table".
|
||||
// It outputs the current table for the specified device name.
|
||||
func GetTable(name string) (uint64, uint64, string, string, error) {
|
||||
task, err := TaskCreateNamed(deviceTable, name)
|
||||
if task == nil {
|
||||
logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
logrus.Debugf("devicemapper: GetTable() Error Run: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
|
||||
devinfo, err := task.getInfo()
|
||||
if err != nil {
|
||||
logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if devinfo.Exists == 0 {
|
||||
logrus.Debugf("devicemapper: GetTable() Non existing device %s", name)
|
||||
return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
|
||||
}
|
||||
|
||||
_, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
|
||||
return start, length, targetType, params, nil
|
||||
}
|
||||
|
||||
// SetTransactionID sets a transaction id for the specified device name.
|
||||
func SetTransactionID(poolName string, oldID uint64, newID uint64) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SuspendDevice is the programmatic example of "dmsetup suspend".
|
||||
// It suspends the specified device.
|
||||
func SuspendDevice(name string) error {
|
||||
task, err := TaskCreateNamed(deviceSuspend, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResumeDevice is the programmatic example of "dmsetup resume".
|
||||
// It un-suspends the specified device.
|
||||
func ResumeDevice(name string) error {
|
||||
task, err := TaskCreateNamed(deviceResume, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateDevice creates a device with the specified poolName with the specified device id.
|
||||
func CreateDevice(poolName string, deviceID int) error {
|
||||
logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID)
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawExist = false // reset before the task is run
|
||||
if err := task.run(); err != nil {
|
||||
// Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
|
||||
if dmSawExist {
|
||||
return ErrDeviceIDExists
|
||||
}
|
||||
|
||||
return fmt.Errorf("devicemapper: Error running CreateDevice %s", err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDevice deletes a device with the specified poolName with the specified device id.
|
||||
func DeleteDevice(poolName string, deviceID int) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawBusy = false
|
||||
if err := task.run(); err != nil {
|
||||
if dmSawBusy {
|
||||
return ErrBusy
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActivateDevice activates the device identified by the specified
|
||||
// poolName, name and deviceID with the specified size.
|
||||
func ActivateDevice(poolName string, name string, deviceID int, size uint64) error {
|
||||
return activateDevice(poolName, name, deviceID, size, "")
|
||||
}
|
||||
|
||||
// ActivateDeviceWithExternal activates the device identified by the specified
|
||||
// poolName, name and deviceID with the specified size.
|
||||
func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error {
|
||||
return activateDevice(poolName, name, deviceID, size, external)
|
||||
}
|
||||
|
||||
func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error {
|
||||
task, err := TaskCreateNamed(deviceCreate, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var params string
|
||||
if len(external) > 0 {
|
||||
params = fmt.Sprintf("%s %d %s", poolName, deviceID, external)
|
||||
} else {
|
||||
params = fmt.Sprintf("%s %d", poolName, deviceID)
|
||||
}
|
||||
if err := task.addTarget(0, size/512, "thin", params); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
||||
}
|
||||
if err := task.setAddNode(addNodeOnCreate); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add node %s", err)
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active.
|
||||
func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawExist = false // reset before the task is run
|
||||
if err := task.run(); err != nil {
|
||||
// Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
|
||||
if dmSawExist {
|
||||
return ErrDeviceIDExists
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId,
|
||||
func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error {
|
||||
devinfo, _ := GetInfo(baseName)
|
||||
doSuspend := devinfo != nil && devinfo.Exists != 0
|
||||
|
||||
if doSuspend {
|
||||
if err := SuspendDevice(baseName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil {
|
||||
if doSuspend {
|
||||
if err2 := ResumeDevice(baseName); err2 != nil {
|
||||
return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if doSuspend {
|
||||
if err := ResumeDevice(baseName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
123
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
Normal file
123
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DevmapperLogger defines methods required to register as a callback for
|
||||
// logging events received from devicemapper. Note that devicemapper will send
|
||||
// *all* logs regardless to callbacks (including debug logs) so it's
|
||||
// recommended to not spam the console with the outputs.
|
||||
type DevmapperLogger interface {
|
||||
// DMLog is the logging callback containing all of the information from
|
||||
// devicemapper. The interface is identical to the C libdm counterpart.
|
||||
DMLog(level int, file string, line int, dmError int, message string)
|
||||
}
|
||||
|
||||
// dmLogger is the current logger in use that is being forwarded our messages.
|
||||
var dmLogger DevmapperLogger
|
||||
|
||||
// LogInit changes the logging callback called after processing libdm logs for
|
||||
// error message information. The default logger simply forwards all logs to
|
||||
// logrus. Calling LogInit(nil) disables the calling of callbacks.
|
||||
func LogInit(logger DevmapperLogger) {
|
||||
dmLogger = logger
|
||||
}
|
||||
|
||||
// Due to the way cgo works this has to be in a separate file, as devmapper.go has
|
||||
// definitions in the cgo block, which is incompatible with using "//export"
|
||||
|
||||
// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that
|
||||
// because we are using callbacks, this function will be called for *every* log
|
||||
// in libdm (even debug ones because there's no way of setting the verbosity
|
||||
// level for an external logging callback).
|
||||
//
|
||||
//export StorageDevmapperLogCallback
|
||||
func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) {
|
||||
msg := C.GoString(message)
|
||||
|
||||
// Track what errno libdm saw, because the library only gives us 0 or 1.
|
||||
if level < LogLevelDebug {
|
||||
if strings.Contains(msg, "busy") {
|
||||
dmSawBusy = true
|
||||
}
|
||||
|
||||
if strings.Contains(msg, "File exists") {
|
||||
dmSawExist = true
|
||||
}
|
||||
|
||||
if strings.Contains(msg, "No such device or address") {
|
||||
dmSawEnxio = true
|
||||
}
|
||||
}
|
||||
|
||||
if dmLogger != nil {
|
||||
dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg)
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultLogger is the default logger used by pkg/devicemapper. It forwards
|
||||
// all logs that are of higher or equal priority to the given level to the
|
||||
// corresponding logrus level.
|
||||
type DefaultLogger struct {
|
||||
// Level corresponds to the highest libdm level that will be forwarded to
|
||||
// logrus. In order to change this, register a new DefaultLogger.
|
||||
Level int
|
||||
}
|
||||
|
||||
// DMLog is the logging callback containing all of the information from
|
||||
// devicemapper. The interface is identical to the C libdm counterpart.
|
||||
func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) {
|
||||
if level <= l.Level {
|
||||
// Forward the log to the correct logrus level, if allowed by dmLogLevel.
|
||||
logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
||||
switch level {
|
||||
case LogLevelFatal, LogLevelErr:
|
||||
logrus.Error(logMsg)
|
||||
case LogLevelWarn:
|
||||
logrus.Warn(logMsg)
|
||||
case LogLevelNotice, LogLevelInfo:
|
||||
logrus.Info(logMsg)
|
||||
case LogLevelDebug:
|
||||
logrus.Debug(logMsg)
|
||||
default:
|
||||
// Don't drop any "unknown" levels.
|
||||
logrus.Info(logMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// registerLogCallback registers our own logging callback function for libdm
|
||||
// (which is StorageDevmapperLogCallback).
|
||||
//
|
||||
// Because libdm only gives us {0,1} error codes we need to parse the logs
|
||||
// produced by libdm (to set dmSawBusy and so on). Note that by registering a
|
||||
// callback using StorageDevmapperLogCallback, libdm will no longer output logs to
|
||||
// stderr so we have to log everything ourselves. None of this handling is
|
||||
// optional because we depend on log callbacks to parse the logs, and if we
|
||||
// don't forward the log information we'll be in a lot of trouble when
|
||||
// debugging things.
|
||||
func registerLogCallback() {
|
||||
LogWithErrnoInit()
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Use the default logger by default. We only allow LogLevelFatal by
|
||||
// default, because internally we mask a lot of libdm errors by retrying
|
||||
// and similar tricks. Also, libdm is very chatty and we don't want to
|
||||
// worry users for no reason.
|
||||
dmLogger = DefaultLogger{
|
||||
Level: LogLevelFatal,
|
||||
}
|
||||
|
||||
// Register as early as possible so we don't miss anything.
|
||||
registerLogCallback()
|
||||
}
|
||||
252
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
Normal file
252
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,252 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
/*
|
||||
#define _GNU_SOURCE
|
||||
#include <libdevmapper.h>
|
||||
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
|
||||
|
||||
// FIXME: Can't we find a way to do the logging in pure Go?
|
||||
extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
|
||||
|
||||
static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
|
||||
{
|
||||
char *buffer = NULL;
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, f);
|
||||
ret = vasprintf(&buffer, f, ap);
|
||||
va_end(ap);
|
||||
if (ret < 0) {
|
||||
// memory allocation failed -- should never happen?
|
||||
return;
|
||||
}
|
||||
|
||||
StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
static void log_with_errno_init()
|
||||
{
|
||||
dm_log_with_errno_init(log_cb);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type (
|
||||
cdmTask C.struct_dm_task
|
||||
)
|
||||
|
||||
// IOCTL consts
|
||||
const (
|
||||
BlkGetSize64 = C.BLKGETSIZE64
|
||||
BlkDiscard = C.BLKDISCARD
|
||||
)
|
||||
|
||||
// Devicemapper cookie flags.
|
||||
const (
|
||||
DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG
|
||||
DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG
|
||||
DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG
|
||||
DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK
|
||||
)
|
||||
|
||||
// DeviceMapper mapped functions.
|
||||
var (
|
||||
DmGetLibraryVersion = dmGetLibraryVersionFct
|
||||
DmGetNextTarget = dmGetNextTargetFct
|
||||
DmSetDevDir = dmSetDevDirFct
|
||||
DmTaskAddTarget = dmTaskAddTargetFct
|
||||
DmTaskCreate = dmTaskCreateFct
|
||||
DmTaskDestroy = dmTaskDestroyFct
|
||||
DmTaskGetDeps = dmTaskGetDepsFct
|
||||
DmTaskGetInfo = dmTaskGetInfoFct
|
||||
DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
|
||||
DmTaskRun = dmTaskRunFct
|
||||
DmTaskSetAddNode = dmTaskSetAddNodeFct
|
||||
DmTaskSetCookie = dmTaskSetCookieFct
|
||||
DmTaskSetMessage = dmTaskSetMessageFct
|
||||
DmTaskSetName = dmTaskSetNameFct
|
||||
DmTaskSetRo = dmTaskSetRoFct
|
||||
DmTaskSetSector = dmTaskSetSectorFct
|
||||
DmUdevWait = dmUdevWaitFct
|
||||
DmUdevSetSyncSupport = dmUdevSetSyncSupportFct
|
||||
DmUdevGetSyncSupport = dmUdevGetSyncSupportFct
|
||||
DmCookieSupported = dmCookieSupportedFct
|
||||
LogWithErrnoInit = logWithErrnoInitFct
|
||||
DmTaskDeferredRemove = dmTaskDeferredRemoveFct
|
||||
DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct
|
||||
)
|
||||
|
||||
func free(p *C.char) {
|
||||
C.free(unsafe.Pointer(p))
|
||||
}
|
||||
|
||||
func dmTaskDestroyFct(task *cdmTask) {
|
||||
C.dm_task_destroy((*C.struct_dm_task)(task))
|
||||
}
|
||||
|
||||
func dmTaskCreateFct(taskType int) *cdmTask {
|
||||
return (*cdmTask)(C.dm_task_create(C.int(taskType)))
|
||||
}
|
||||
|
||||
func dmTaskRunFct(task *cdmTask) int {
|
||||
ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
|
||||
return int(ret)
|
||||
}
|
||||
|
||||
func dmTaskSetNameFct(task *cdmTask, name string) int {
|
||||
Cname := C.CString(name)
|
||||
defer free(Cname)
|
||||
|
||||
return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
|
||||
}
|
||||
|
||||
func dmTaskSetMessageFct(task *cdmTask, message string) int {
|
||||
Cmessage := C.CString(message)
|
||||
defer free(Cmessage)
|
||||
|
||||
return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
|
||||
}
|
||||
|
||||
func dmTaskSetSectorFct(task *cdmTask, sector uint64) int {
|
||||
return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
|
||||
}
|
||||
|
||||
func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int {
|
||||
cCookie := C.uint32_t(*cookie)
|
||||
defer func() {
|
||||
*cookie = uint(cCookie)
|
||||
}()
|
||||
return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
|
||||
}
|
||||
|
||||
func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int {
|
||||
return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
|
||||
}
|
||||
|
||||
func dmTaskSetRoFct(task *cdmTask) int {
|
||||
return int(C.dm_task_set_ro((*C.struct_dm_task)(task)))
|
||||
}
|
||||
|
||||
func dmTaskAddTargetFct(task *cdmTask,
|
||||
start, size uint64, ttype, params string,
|
||||
) int {
|
||||
Cttype := C.CString(ttype)
|
||||
defer free(Cttype)
|
||||
|
||||
Cparams := C.CString(params)
|
||||
defer free(Cparams)
|
||||
|
||||
return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
|
||||
}
|
||||
|
||||
func dmTaskGetDepsFct(task *cdmTask) *Deps {
|
||||
Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task))
|
||||
if Cdeps == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// golang issue: https://github.com/golang/go/issues/11925
|
||||
var devices []C.uint64_t
|
||||
devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices))
|
||||
devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps)))
|
||||
devicesHdr.Len = int(Cdeps.count)
|
||||
devicesHdr.Cap = int(Cdeps.count)
|
||||
|
||||
deps := &Deps{
|
||||
Count: uint32(Cdeps.count),
|
||||
Filler: uint32(Cdeps.filler),
|
||||
}
|
||||
for _, device := range devices {
|
||||
deps.Device = append(deps.Device, uint64(device))
|
||||
}
|
||||
return deps
|
||||
}
|
||||
|
||||
func dmTaskGetInfoFct(task *cdmTask, info *Info) int {
|
||||
Cinfo := C.struct_dm_info{}
|
||||
defer func() {
|
||||
info.Exists = int(Cinfo.exists)
|
||||
info.Suspended = int(Cinfo.suspended)
|
||||
info.LiveTable = int(Cinfo.live_table)
|
||||
info.InactiveTable = int(Cinfo.inactive_table)
|
||||
info.OpenCount = int32(Cinfo.open_count)
|
||||
info.EventNr = uint32(Cinfo.event_nr)
|
||||
info.Major = uint32(Cinfo.major)
|
||||
info.Minor = uint32(Cinfo.minor)
|
||||
info.ReadOnly = int(Cinfo.read_only)
|
||||
info.TargetCount = int32(Cinfo.target_count)
|
||||
}()
|
||||
return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
|
||||
}
|
||||
|
||||
func dmTaskGetDriverVersionFct(task *cdmTask) string {
|
||||
buffer := C.malloc(128)
|
||||
defer C.free(buffer)
|
||||
res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128)
|
||||
if res == 0 {
|
||||
return ""
|
||||
}
|
||||
return C.GoString((*C.char)(buffer))
|
||||
}
|
||||
|
||||
func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer {
|
||||
var (
|
||||
Cstart, Clength C.uint64_t
|
||||
CtargetType, Cparams *C.char
|
||||
)
|
||||
defer func() {
|
||||
*start = uint64(Cstart)
|
||||
*length = uint64(Clength)
|
||||
*target = C.GoString(CtargetType)
|
||||
*params = C.GoString(Cparams)
|
||||
}()
|
||||
|
||||
nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams)
|
||||
return nextp
|
||||
}
|
||||
|
||||
func dmUdevSetSyncSupportFct(syncWithUdev int) {
|
||||
(C.dm_udev_set_sync_support(C.int(syncWithUdev)))
|
||||
}
|
||||
|
||||
func dmUdevGetSyncSupportFct() int {
|
||||
return int(C.dm_udev_get_sync_support())
|
||||
}
|
||||
|
||||
func dmUdevWaitFct(cookie uint) int {
|
||||
return int(C.dm_udev_wait(C.uint32_t(cookie)))
|
||||
}
|
||||
|
||||
func dmCookieSupportedFct() int {
|
||||
return int(C.dm_cookie_supported())
|
||||
}
|
||||
|
||||
func logWithErrnoInitFct() {
|
||||
C.log_with_errno_init()
|
||||
}
|
||||
|
||||
func dmSetDevDirFct(dir string) int {
|
||||
Cdir := C.CString(dir)
|
||||
defer free(Cdir)
|
||||
|
||||
return int(C.dm_set_dev_dir(Cdir))
|
||||
}
|
||||
|
||||
func dmGetLibraryVersionFct(version *string) int {
|
||||
buffer := C.CString(string(make([]byte, 128)))
|
||||
defer free(buffer)
|
||||
defer func() {
|
||||
*version = C.GoString(buffer)
|
||||
}()
|
||||
return int(C.dm_get_library_version(buffer, 128))
|
||||
}
|
||||
32
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
generated
vendored
Normal file
32
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
generated
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
//go:build linux && cgo && !libdm_no_deferred_remove
|
||||
// +build linux,cgo,!libdm_no_deferred_remove
|
||||
|
||||
package devicemapper
|
||||
|
||||
// #include <libdevmapper.h>
|
||||
import "C"
|
||||
|
||||
// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
|
||||
const LibraryDeferredRemovalSupport = true
|
||||
|
||||
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
||||
return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task)))
|
||||
}
|
||||
|
||||
func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
|
||||
Cinfo := C.struct_dm_info{}
|
||||
defer func() {
|
||||
info.Exists = int(Cinfo.exists)
|
||||
info.Suspended = int(Cinfo.suspended)
|
||||
info.LiveTable = int(Cinfo.live_table)
|
||||
info.InactiveTable = int(Cinfo.inactive_table)
|
||||
info.OpenCount = int32(Cinfo.open_count)
|
||||
info.EventNr = uint32(Cinfo.event_nr)
|
||||
info.Major = uint32(Cinfo.major)
|
||||
info.Minor = uint32(Cinfo.minor)
|
||||
info.ReadOnly = int(Cinfo.read_only)
|
||||
info.TargetCount = int32(Cinfo.target_count)
|
||||
info.DeferredRemove = int(Cinfo.deferred_remove)
|
||||
}()
|
||||
return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
|
||||
}
|
||||
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
Normal file
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
//go:build linux && cgo && !static_build
|
||||
// +build linux,cgo,!static_build
|
||||
|
||||
package devicemapper
|
||||
|
||||
// #cgo pkg-config: devmapper
|
||||
import "C"
|
||||
16
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
generated
vendored
Normal file
16
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
//go:build linux && cgo && libdm_no_deferred_remove
|
||||
// +build linux,cgo,libdm_no_deferred_remove
|
||||
|
||||
package devicemapper
|
||||
|
||||
// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
|
||||
const LibraryDeferredRemovalSupport = false
|
||||
|
||||
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
||||
// Error. Nobody should be calling it.
|
||||
return -1
|
||||
}
|
||||
|
||||
func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
|
||||
return -1
|
||||
}
|
||||
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
Normal file
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
//go:build linux && cgo && static_build
|
||||
// +build linux,cgo,static_build
|
||||
|
||||
package devicemapper
|
||||
|
||||
// #cgo pkg-config: --static devmapper
|
||||
import "C"
|
||||
29
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
Normal file
29
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func ioctlBlkGetSize64(fd uintptr) (int64, error) {
|
||||
var size int64
|
||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
|
||||
return 0, err
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func ioctlBlkDiscard(fd uintptr, offset, length uint64) error {
|
||||
var r [2]uint64
|
||||
r[0] = offset
|
||||
r[1] = length
|
||||
|
||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
11
vendor/github.com/containers/storage/pkg/devicemapper/log.go
generated
vendored
Normal file
11
vendor/github.com/containers/storage/pkg/devicemapper/log.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package devicemapper
|
||||
|
||||
// definitions from lvm2 lib/log/log.h
|
||||
const (
|
||||
LogLevelFatal = 2 + iota // _LOG_FATAL
|
||||
LogLevelErr // _LOG_ERR
|
||||
LogLevelWarn // _LOG_WARN
|
||||
LogLevelNotice // _LOG_NOTICE
|
||||
LogLevelInfo // _LOG_INFO
|
||||
LogLevelDebug // _LOG_DEBUG
|
||||
)
|
||||
31
vendor/github.com/containers/storage/pkg/directory/directory.go
generated
vendored
Normal file
31
vendor/github.com/containers/storage/pkg/directory/directory.go
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// DiskUsage is a structure that describes the disk usage (size and inode count)
|
||||
// of a particular directory.
|
||||
type DiskUsage struct {
|
||||
Size int64
|
||||
InodeCount int64
|
||||
}
|
||||
|
||||
// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
|
||||
func MoveToSubdir(oldpath, subdir string) error {
|
||||
infos, err := os.ReadDir(oldpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, info := range infos {
|
||||
if info.Name() != subdir {
|
||||
oldName := filepath.Join(oldpath, info.Name())
|
||||
newName := filepath.Join(oldpath, subdir, info.Name())
|
||||
if err := os.Rename(oldName, newName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
62
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
Normal file
62
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
//go:build linux || darwin || freebsd || solaris
|
||||
// +build linux darwin freebsd solaris
|
||||
|
||||
package directory
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Size walks a directory tree and returns its total size in bytes.
|
||||
func Size(dir string) (size int64, err error) {
|
||||
usage, err := Usage(dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return usage.Size, nil
|
||||
}
|
||||
|
||||
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
|
||||
func Usage(dir string) (usage *DiskUsage, err error) {
|
||||
usage = &DiskUsage{}
|
||||
data := make(map[uint64]struct{})
|
||||
err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
// if dir does not exist, Usage() returns the error.
|
||||
// if dir/x disappeared while walking, Usage() ignores dir/x.
|
||||
if os.IsNotExist(err) && d != dir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fileInfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check inode to only count the sizes of files with multiple hard links once.
|
||||
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
|
||||
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
||||
if _, exists := data[uint64(inode)]; exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
||||
data[uint64(inode)] = struct{}{}
|
||||
// Ignore directory sizes
|
||||
if entry.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage.Size += fileInfo.Size()
|
||||
|
||||
return nil
|
||||
})
|
||||
// inode count is the number of unique inode numbers we saw
|
||||
usage.InodeCount = int64(len(data))
|
||||
return
|
||||
}
|
||||
50
vendor/github.com/containers/storage/pkg/directory/directory_windows.go
generated
vendored
Normal file
50
vendor/github.com/containers/storage/pkg/directory/directory_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package directory
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Size walks a directory tree and returns its total size in bytes
|
||||
func Size(dir string) (size int64, err error) {
|
||||
usage, err := Usage(dir)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return usage.Size, nil
|
||||
}
|
||||
|
||||
// Usage walks a directory tree and returns its total size in bytes and the number of inodes.
|
||||
func Usage(dir string) (usage *DiskUsage, err error) {
|
||||
usage = &DiskUsage{}
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
// if dir does not exist, Size() returns the error.
|
||||
// if dir/x disappeared while walking, Size() ignores dir/x.
|
||||
if os.IsNotExist(err) && path != dir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
usage.InodeCount++
|
||||
|
||||
// Ignore directory sizes
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
fileInfo, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usage.Size += fileInfo.Size()
|
||||
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
21
vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
generated
vendored
Normal file
21
vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package dmesg
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Dmesg returns last messages from the kernel log, up to size bytes
|
||||
func Dmesg(size int) []byte {
|
||||
t := uintptr(3) // SYSLOG_ACTION_READ_ALL
|
||||
b := make([]byte, size)
|
||||
amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
|
||||
if err != 0 {
|
||||
return []byte{}
|
||||
}
|
||||
return b[:amt]
|
||||
}
|
||||
88
vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
generated
vendored
Normal file
88
vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func locateDummyIfEmpty(path string) (string, error) {
|
||||
children, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return "", nil
|
||||
}
|
||||
dummyFile, err := os.CreateTemp(path, "fsutils-dummy")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
name := dummyFile.Name()
|
||||
err = dummyFile.Close()
|
||||
return name, err
|
||||
}
|
||||
|
||||
// SupportsDType returns whether the filesystem mounted on path supports d_type
|
||||
func SupportsDType(path string) (bool, error) {
|
||||
// locate dummy so that we have at least one dirent
|
||||
dummy, err := locateDummyIfEmpty(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if dummy != "" {
|
||||
defer os.Remove(dummy)
|
||||
}
|
||||
|
||||
visited := 0
|
||||
supportsDType := true
|
||||
fn := func(ent *unix.Dirent) bool {
|
||||
visited++
|
||||
if ent.Type == unix.DT_UNKNOWN {
|
||||
supportsDType = false
|
||||
// stop iteration
|
||||
return true
|
||||
}
|
||||
// continue iteration
|
||||
return false
|
||||
}
|
||||
if err = iterateReadDir(path, fn); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if visited == 0 {
|
||||
return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
|
||||
}
|
||||
return supportsDType, nil
|
||||
}
|
||||
|
||||
func iterateReadDir(path string, fn func(*unix.Dirent) bool) error {
|
||||
d, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
fd := int(d.Fd())
|
||||
buf := make([]byte, 4096)
|
||||
for {
|
||||
nbytes, err := unix.ReadDirent(fd, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nbytes == 0 {
|
||||
break
|
||||
}
|
||||
for off := 0; off < nbytes; {
|
||||
ent := (*unix.Dirent)(unsafe.Pointer(&buf[off]))
|
||||
if stop := fn(ent); stop {
|
||||
return nil
|
||||
}
|
||||
off += int(ent.Reclen)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
88
vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
generated
vendored
Normal file
88
vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package idmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
|
||||
// for the PID process.
|
||||
func CreateIDMappedMount(source, target string, pid int) error {
|
||||
path := fmt.Sprintf("/proc/%d/ns/user", pid)
|
||||
userNsFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err)
|
||||
}
|
||||
defer userNsFile.Close()
|
||||
|
||||
targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unix.Close(targetDirFd)
|
||||
|
||||
if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
|
||||
&unix.MountAttr{
|
||||
Attr_set: unix.MOUNT_ATTR_IDMAP,
|
||||
Userns_fd: uint64(userNsFile.Fd()),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH)
|
||||
}
|
||||
|
||||
// CreateUsernsProcess forks the current process and creates a user namespace using the specified
|
||||
// mappings. It returns the pid of the new process.
|
||||
func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
|
||||
var pid uintptr
|
||||
var err syscall.Errno
|
||||
|
||||
if runtime.GOARCH == "s390x" {
|
||||
pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), 0, unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0)
|
||||
} else {
|
||||
pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0)
|
||||
}
|
||||
if err != 0 {
|
||||
return -1, nil, err
|
||||
}
|
||||
if pid == 0 {
|
||||
_ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
|
||||
// just wait for the SIGKILL
|
||||
for {
|
||||
syscall.Pause()
|
||||
}
|
||||
}
|
||||
cleanupFunc := func() {
|
||||
unix.Kill(int(pid), unix.SIGKILL)
|
||||
_, _ = unix.Wait4(int(pid), nil, 0, nil)
|
||||
}
|
||||
writeMappings := func(fname string, idmap []idtools.IDMap) error {
|
||||
mappings := ""
|
||||
for _, m := range idmap {
|
||||
mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0o600)
|
||||
}
|
||||
if err := writeMappings("uid_map", uidMaps); err != nil {
|
||||
cleanupFunc()
|
||||
return -1, nil, err
|
||||
}
|
||||
if err := writeMappings("gid_map", gidMaps); err != nil {
|
||||
cleanupFunc()
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
return int(pid), cleanupFunc, nil
|
||||
}
|
||||
22
vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go
generated
vendored
Normal file
22
vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package idmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
|
||||
// for the PID process.
|
||||
func CreateIDMappedMount(source, target string, pid int) error {
|
||||
return fmt.Errorf("IDMapped mounts are not supported")
|
||||
}
|
||||
|
||||
// CreateUsernsProcess forks the current process and creates a user namespace using the specified
|
||||
// mappings. It returns the pid of the new process.
|
||||
func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
|
||||
return -1, nil, fmt.Errorf("IDMapped mounts are not supported")
|
||||
}
|
||||
65
vendor/github.com/containers/storage/pkg/locker/README.md
generated
vendored
Normal file
65
vendor/github.com/containers/storage/pkg/locker/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
Locker
|
||||
=====
|
||||
|
||||
locker provides a mechanism for creating finer-grained locking to help
|
||||
free up more global locks to handle other tasks.
|
||||
|
||||
The implementation looks close to a sync.Mutex, however, the user must provide a
|
||||
reference to use to refer to the underlying lock when locking and unlocking,
|
||||
and unlock may generate an error.
|
||||
|
||||
If a lock with a given name does not exist when `Lock` is called, one is
|
||||
created.
|
||||
Lock references are automatically cleaned up on `Unlock` if nothing else is
|
||||
waiting for the lock.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package important
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
)
|
||||
|
||||
type important struct {
|
||||
locks *locker.Locker
|
||||
data map[string]interface{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (i *important) Get(name string) interface{} {
|
||||
i.locks.Lock(name)
|
||||
defer i.locks.Unlock(name)
|
||||
return data[name]
|
||||
}
|
||||
|
||||
func (i *important) Create(name string, data interface{}) {
|
||||
i.locks.Lock(name)
|
||||
defer i.locks.Unlock(name)
|
||||
|
||||
i.createImportant(data)
|
||||
|
||||
s.mu.Lock()
|
||||
i.data[name] = data
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (i *important) createImportant(data interface{}) {
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
```
|
||||
|
||||
For functions dealing with a given name, always lock at the beginning of the
|
||||
function (or before doing anything with the underlying state), this ensures any
|
||||
other function that is dealing with the same name will block.
|
||||
|
||||
When needing to modify the underlying data, use the global lock to ensure nothing
|
||||
else is modifying it at the same time.
|
||||
Since name lock is already in place, no reads will occur while the modification
|
||||
is being performed.
|
||||
|
||||
112
vendor/github.com/containers/storage/pkg/locker/locker.go
generated
vendored
Normal file
112
vendor/github.com/containers/storage/pkg/locker/locker.go
generated
vendored
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
Package locker provides a mechanism for creating finer-grained locking to help
|
||||
free up more global locks to handle other tasks.
|
||||
|
||||
The implementation looks close to a sync.Mutex, however the user must provide a
|
||||
reference to use to refer to the underlying lock when locking and unlocking,
|
||||
and unlock may generate an error.
|
||||
|
||||
If a lock with a given name does not exist when `Lock` is called, one is
|
||||
created.
|
||||
Lock references are automatically cleaned up on `Unlock` if nothing else is
|
||||
waiting for the lock.
|
||||
*/
|
||||
package locker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// ErrNoSuchLock is returned when the requested lock does not exist
|
||||
var ErrNoSuchLock = errors.New("no such lock")
|
||||
|
||||
// Locker provides a locking mechanism based on the passed in reference name
|
||||
type Locker struct {
|
||||
mu sync.Mutex
|
||||
locks map[string]*lockCtr
|
||||
}
|
||||
|
||||
// lockCtr is used by Locker to represent a lock with a given name.
|
||||
type lockCtr struct {
|
||||
mu sync.Mutex
|
||||
// waiters is the number of waiters waiting to acquire the lock
|
||||
// this is int32 instead of uint32 so we can add `-1` in `dec()`
|
||||
waiters int32
|
||||
}
|
||||
|
||||
// inc increments the number of waiters waiting for the lock
|
||||
func (l *lockCtr) inc() {
|
||||
atomic.AddInt32(&l.waiters, 1)
|
||||
}
|
||||
|
||||
// dec decrements the number of waiters waiting on the lock
|
||||
func (l *lockCtr) dec() {
|
||||
atomic.AddInt32(&l.waiters, -1)
|
||||
}
|
||||
|
||||
// count gets the current number of waiters
|
||||
func (l *lockCtr) count() int32 {
|
||||
return atomic.LoadInt32(&l.waiters)
|
||||
}
|
||||
|
||||
// Lock locks the mutex
|
||||
func (l *lockCtr) Lock() {
|
||||
l.mu.Lock()
|
||||
}
|
||||
|
||||
// Unlock unlocks the mutex
|
||||
func (l *lockCtr) Unlock() {
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
// New creates a new Locker
|
||||
func New() *Locker {
|
||||
return &Locker{
|
||||
locks: make(map[string]*lockCtr),
|
||||
}
|
||||
}
|
||||
|
||||
// Lock locks a mutex with the given name. If it doesn't exist, one is created
|
||||
func (l *Locker) Lock(name string) {
|
||||
l.mu.Lock()
|
||||
if l.locks == nil {
|
||||
l.locks = make(map[string]*lockCtr)
|
||||
}
|
||||
|
||||
nameLock, exists := l.locks[name]
|
||||
if !exists {
|
||||
nameLock = &lockCtr{}
|
||||
l.locks[name] = nameLock
|
||||
}
|
||||
|
||||
// increment the nameLock waiters while inside the main mutex
|
||||
// this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently
|
||||
nameLock.inc()
|
||||
l.mu.Unlock()
|
||||
|
||||
// Lock the nameLock outside the main mutex so we don't block other operations
|
||||
// once locked then we can decrement the number of waiters for this lock
|
||||
nameLock.Lock()
|
||||
nameLock.dec()
|
||||
}
|
||||
|
||||
// Unlock unlocks the mutex with the given name
|
||||
// If the given lock is not being waited on by any other callers, it is deleted
|
||||
func (l *Locker) Unlock(name string) error {
|
||||
l.mu.Lock()
|
||||
nameLock, exists := l.locks[name]
|
||||
if !exists {
|
||||
l.mu.Unlock()
|
||||
return ErrNoSuchLock
|
||||
}
|
||||
|
||||
if nameLock.count() == 0 {
|
||||
delete(l.locks, name)
|
||||
}
|
||||
nameLock.Unlock()
|
||||
|
||||
l.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
173
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
Normal file
173
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package loopback
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Loopback related errors
|
||||
var (
|
||||
ErrAttachLoopbackDevice = errors.New("loopback attach failed")
|
||||
ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
|
||||
ErrSetCapacity = errors.New("Unable set loopback capacity")
|
||||
)
|
||||
|
||||
func stringToLoopName(src string) [LoNameSize]uint8 {
|
||||
var dst [LoNameSize]uint8
|
||||
copy(dst[:], src[:])
|
||||
return dst
|
||||
}
|
||||
|
||||
func getNextFreeLoopbackIndex() (int, error) {
|
||||
f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0o644)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
index, err := ioctlLoopCtlGetFree(f.Fd())
|
||||
if index < 0 {
|
||||
index = 0
|
||||
}
|
||||
return index, err
|
||||
}
|
||||
|
||||
func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) {
|
||||
// Read information about the loopback file.
|
||||
var st syscall.Stat_t
|
||||
err = syscall.Fstat(int(sparseFile.Fd()), &st)
|
||||
if err != nil {
|
||||
logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
// Start looking for a free /dev/loop
|
||||
for {
|
||||
target := fmt.Sprintf("/dev/loop%d", index)
|
||||
index++
|
||||
|
||||
fi, err := os.Stat(target)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
logrus.Error("There are no more loopback devices available.")
|
||||
}
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDevice != os.ModeDevice {
|
||||
logrus.Errorf("Loopback device %s is not a block device.", target)
|
||||
continue
|
||||
}
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
logrus.Errorf("Opening loopback device: %s", err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
// Try to attach to the loop file
|
||||
if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
|
||||
loopFile.Close()
|
||||
|
||||
// If the error is EBUSY, then try the next loopback
|
||||
if err != syscall.EBUSY {
|
||||
logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
// Otherwise, we keep going with the loop
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the loopback driver and underlying filesystem agree on the loopback file's
|
||||
// device and inode numbers.
|
||||
dev, ino, err := getLoopbackBackingFile(loopFile)
|
||||
if err != nil {
|
||||
logrus.Errorf("Getting loopback backing file: %s", err)
|
||||
return nil, ErrGetLoopbackBackingFile
|
||||
}
|
||||
if dev != uint64(st.Dev) || ino != st.Ino {
|
||||
logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
|
||||
}
|
||||
|
||||
// In case of success, we finished. Break the loop.
|
||||
break
|
||||
}
|
||||
|
||||
// This can't happen, but let's be sure
|
||||
if loopFile == nil {
|
||||
logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
return loopFile, nil
|
||||
}
|
||||
|
||||
// AttachLoopDevice attaches the given sparse file to the next
|
||||
// available loopback device. It returns an opened *os.File.
|
||||
func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
|
||||
return attachLoopDevice(sparseName, false)
|
||||
}
|
||||
|
||||
// AttachLoopDeviceRO attaches the given sparse file opened read-only to
|
||||
// the next available loopback device. It returns an opened *os.File.
|
||||
func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) {
|
||||
return attachLoopDevice(sparseName, true)
|
||||
}
|
||||
|
||||
func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) {
|
||||
// Try to retrieve the next available loopback device via syscall.
|
||||
// If it fails, we discard error and start looping for a
|
||||
// loopback from index 0.
|
||||
startIndex, err := getNextFreeLoopbackIndex()
|
||||
if err != nil {
|
||||
logrus.Debugf("Error retrieving the next available loopback: %s", err)
|
||||
}
|
||||
|
||||
var sparseFile *os.File
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
if readonly {
|
||||
sparseFile, err = os.OpenFile(sparseName, os.O_RDONLY, 0o644)
|
||||
} else {
|
||||
sparseFile, err = os.OpenFile(sparseName, os.O_RDWR, 0o644)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("Opening sparse file: %v", err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
defer sparseFile.Close()
|
||||
|
||||
loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the status of the loopback device
|
||||
loopInfo := &loopInfo64{
|
||||
loFileName: stringToLoopName(loopFile.Name()),
|
||||
loOffset: 0,
|
||||
loFlags: LoFlagsAutoClear,
|
||||
}
|
||||
|
||||
if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
|
||||
logrus.Errorf("Cannot set up loopback device info: %s", err)
|
||||
|
||||
// If the call failed, then free the loopback device
|
||||
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
|
||||
logrus.Error("While cleaning up the loopback device")
|
||||
}
|
||||
loopFile.Close()
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
return loopFile, nil
|
||||
}
|
||||
54
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
Normal file
54
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package loopback
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
|
||||
index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0)
|
||||
if err != 0 {
|
||||
return 0, err
|
||||
}
|
||||
return int(index), nil
|
||||
}
|
||||
|
||||
func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error {
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ioctlLoopClrFd(loopFd uintptr) error {
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) {
|
||||
loopInfo := &loopInfo64{}
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return loopInfo, nil
|
||||
}
|
||||
|
||||
func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
53
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
Normal file
53
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package loopback
|
||||
|
||||
/*
|
||||
#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
|
||||
|
||||
#ifndef LOOP_CTL_GET_FREE
|
||||
#define LOOP_CTL_GET_FREE 0x4C82
|
||||
#endif
|
||||
|
||||
#ifndef LO_FLAGS_PARTSCAN
|
||||
#define LO_FLAGS_PARTSCAN 8
|
||||
#endif
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type loopInfo64 struct {
|
||||
loDevice uint64 /* ioctl r/o */
|
||||
loInode uint64 /* ioctl r/o */
|
||||
loRdevice uint64 /* ioctl r/o */
|
||||
loOffset uint64
|
||||
loSizelimit uint64 /* bytes, 0 == max available */
|
||||
loNumber uint32 /* ioctl r/o */
|
||||
loEncryptType uint32
|
||||
loEncryptKeySize uint32 /* ioctl w/o */
|
||||
loFlags uint32 /* ioctl r/o */
|
||||
loFileName [LoNameSize]uint8
|
||||
loCryptName [LoNameSize]uint8
|
||||
loEncryptKey [LoKeySize]uint8 /* ioctl w/o */
|
||||
loInit [2]uint64
|
||||
}
|
||||
|
||||
// IOCTL consts
|
||||
const (
|
||||
LoopSetFd = C.LOOP_SET_FD
|
||||
LoopCtlGetFree = C.LOOP_CTL_GET_FREE
|
||||
LoopGetStatus64 = C.LOOP_GET_STATUS64
|
||||
LoopSetStatus64 = C.LOOP_SET_STATUS64
|
||||
LoopClrFd = C.LOOP_CLR_FD
|
||||
LoopSetCapacity = C.LOOP_SET_CAPACITY
|
||||
)
|
||||
|
||||
// LOOP consts.
|
||||
const (
|
||||
LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
|
||||
LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY
|
||||
LoFlagsPartScan = C.LO_FLAGS_PARTSCAN
|
||||
LoKeySize = C.LO_KEY_SIZE
|
||||
LoNameSize = C.LO_NAME_SIZE
|
||||
)
|
||||
64
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
Normal file
64
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package loopback
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
|
||||
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
|
||||
if err != nil {
|
||||
logrus.Errorf("Get loopback backing file: %v", err)
|
||||
return 0, 0, ErrGetLoopbackBackingFile
|
||||
}
|
||||
return loopInfo.loDevice, loopInfo.loInode, nil
|
||||
}
|
||||
|
||||
// SetCapacity reloads the size for the loopback device.
|
||||
func SetCapacity(file *os.File) error {
|
||||
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
|
||||
logrus.Errorf("loopbackSetCapacity: %s", err)
|
||||
return ErrSetCapacity
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindLoopDeviceFor returns a loopback device file for the specified file which
|
||||
// is backing file of a loop back device.
|
||||
func FindLoopDeviceFor(file *os.File) *os.File {
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetInode := stat.Sys().(*syscall.Stat_t).Ino
|
||||
targetDevice := stat.Sys().(*syscall.Stat_t).Dev
|
||||
|
||||
for i := 0; true; i++ {
|
||||
path := fmt.Sprintf("/dev/loop%d", i)
|
||||
|
||||
file, err := os.OpenFile(path, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ignore all errors until the first not-exist
|
||||
// we want to continue looking for the file
|
||||
continue
|
||||
}
|
||||
|
||||
dev, inode, err := getLoopbackBackingFile(file)
|
||||
if err == nil && dev == uint64(targetDevice) && inode == targetInode {
|
||||
return file
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
1
vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
package loopback
|
||||
75
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
generated
vendored
Normal file
75
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
generated
vendored
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Package kernel provides helper function to get, parse and compare kernel
|
||||
// versions for different platforms.
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// VersionInfo holds information about the kernel.
|
||||
type VersionInfo struct {
|
||||
Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4)
|
||||
Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1)
|
||||
Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2)
|
||||
Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic)
|
||||
}
|
||||
|
||||
func (k *VersionInfo) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
|
||||
}
|
||||
|
||||
// CompareKernelVersion compares two kernel.VersionInfo structs.
|
||||
// Returns -1 if a < b, 0 if a == b, 1 it a > b
|
||||
func CompareKernelVersion(a, b VersionInfo) int {
|
||||
if a.Kernel < b.Kernel {
|
||||
return -1
|
||||
} else if a.Kernel > b.Kernel {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Major < b.Major {
|
||||
return -1
|
||||
} else if a.Major > b.Major {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Minor < b.Minor {
|
||||
return -1
|
||||
} else if a.Minor > b.Minor {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ParseRelease parses a string and creates a VersionInfo based on it.
|
||||
func ParseRelease(release string) (*VersionInfo, error) {
|
||||
var (
|
||||
kernel, major, minor, parsed int
|
||||
flavor, partial string
|
||||
)
|
||||
|
||||
// Ignore error from Sscanf to allow an empty flavor. Instead, just
|
||||
// make sure we got all the version numbers.
|
||||
parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
|
||||
if parsed < 2 {
|
||||
return nil, errors.New("Can't parse kernel version " + release)
|
||||
}
|
||||
|
||||
// sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
|
||||
parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
|
||||
if parsed < 1 {
|
||||
flavor = partial
|
||||
}
|
||||
|
||||
return &VersionInfo{
|
||||
Kernel: kernel,
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
Flavor: flavor,
|
||||
}, nil
|
||||
}
|
||||
57
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
generated
vendored
Normal file
57
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
// Package kernel provides helper function to get, parse and compare kernel
|
||||
// versions for different platforms.
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/mattn/go-shellwords"
|
||||
)
|
||||
|
||||
// GetKernelVersion gets the current kernel version.
|
||||
func GetKernelVersion() (*VersionInfo, error) {
|
||||
release, err := getRelease()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ParseRelease(release)
|
||||
}
|
||||
|
||||
// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version
|
||||
func getRelease() (string, error) {
|
||||
cmd := exec.Command("system_profiler", "SPSoftwareDataType")
|
||||
osName, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var release string
|
||||
data := strings.Split(string(osName), "\n")
|
||||
for _, line := range data {
|
||||
if strings.Contains(line, "Kernel Version") {
|
||||
// It has the format like ' Kernel Version: Darwin 14.5.0'
|
||||
content := strings.SplitN(line, ":", 2)
|
||||
if len(content) != 2 {
|
||||
return "", fmt.Errorf("kernel version is invalid")
|
||||
}
|
||||
|
||||
prettyNames, err := shellwords.Parse(content[1])
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("kernel version is invalid: %w", err)
|
||||
}
|
||||
|
||||
if len(prettyNames) != 2 {
|
||||
return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ")
|
||||
}
|
||||
release = prettyNames[1]
|
||||
}
|
||||
}
|
||||
|
||||
return release, nil
|
||||
}
|
||||
46
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
generated
vendored
Normal file
46
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
//go:build linux || freebsd || solaris || openbsd
|
||||
// +build linux freebsd solaris openbsd
|
||||
|
||||
// Package kernel provides helper function to get, parse and compare kernel
|
||||
// versions for different platforms.
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetKernelVersion gets the current kernel version.
|
||||
func GetKernelVersion() (*VersionInfo, error) {
|
||||
uts, err := uname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
release := make([]byte, len(uts.Release))
|
||||
|
||||
i := 0
|
||||
for _, c := range uts.Release {
|
||||
release[i] = byte(c)
|
||||
i++
|
||||
}
|
||||
|
||||
// Remove the \x00 from the release for Atoi to parse correctly
|
||||
release = release[:bytes.IndexByte(release, 0)]
|
||||
|
||||
return ParseRelease(string(release))
|
||||
}
|
||||
|
||||
// CheckKernelVersion checks if current kernel is newer than (or equal to)
|
||||
// the given version.
|
||||
func CheckKernelVersion(k, major, minor int) bool {
|
||||
if v, err := GetKernelVersion(); err != nil {
|
||||
logrus.Warnf("Error getting kernel version: %s", err)
|
||||
} else {
|
||||
if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
70
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
Normal file
70
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// VersionInfo holds information about the kernel.
|
||||
type VersionInfo struct {
|
||||
kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6)
|
||||
major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1)
|
||||
minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601)
|
||||
build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592)
|
||||
}
|
||||
|
||||
func (k *VersionInfo) String() string {
|
||||
return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
|
||||
}
|
||||
|
||||
// GetKernelVersion gets the current kernel version.
|
||||
func GetKernelVersion() (*VersionInfo, error) {
|
||||
var (
|
||||
h windows.Handle
|
||||
dwVersion uint32
|
||||
err error
|
||||
)
|
||||
|
||||
KVI := &VersionInfo{"Unknown", 0, 0, 0}
|
||||
|
||||
if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE,
|
||||
windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
|
||||
0,
|
||||
windows.KEY_READ,
|
||||
&h); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
defer windows.RegCloseKey(h)
|
||||
|
||||
var buf [1 << 10]uint16
|
||||
var typ uint32
|
||||
n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
|
||||
|
||||
if err = windows.RegQueryValueEx(h,
|
||||
windows.StringToUTF16Ptr("BuildLabEx"),
|
||||
nil,
|
||||
&typ,
|
||||
(*byte)(unsafe.Pointer(&buf[0])),
|
||||
&n); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
|
||||
KVI.kvi = windows.UTF16ToString(buf[:])
|
||||
|
||||
// Important - docker.exe MUST be manifested for this API to return
|
||||
// the correct information.
|
||||
if dwVersion, err = windows.GetVersion(); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
|
||||
KVI.major = int(dwVersion & 0xFF)
|
||||
KVI.minor = int((dwVersion & 0xFF00) >> 8)
|
||||
KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
|
||||
|
||||
return KVI, nil
|
||||
}
|
||||
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
package kernel
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is passthrough for unix.Utsname in order to make it portable with
|
||||
// other platforms where it is not available.
|
||||
type Utsname unix.Utsname
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
||||
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
generated
vendored
Normal file
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
package kernel
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is passthrough for unix.Utsname in order to make it portable with
|
||||
// other platforms where it is not available.
|
||||
type Utsname unix.Utsname
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
||||
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
generated
vendored
Normal file
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
package kernel
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
||||
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
Normal file
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
//go:build openbsd
|
||||
// +build openbsd
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// A stub called by kernel_unix.go .
|
||||
func uname() (*Utsname, error) {
|
||||
return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS)
|
||||
}
|
||||
11
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
Normal file
11
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
//go:build !linux && !solaris && !freebsd
|
||||
// +build !linux,!solaris,!freebsd
|
||||
|
||||
package kernel
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is defined here to make it portable as it is available on linux but not
|
||||
// on windows.
|
||||
type Utsname struct {
|
||||
Release [65]byte
|
||||
}
|
||||
70
vendor/github.com/containers/storage/pkg/parsers/parsers.go
generated
vendored
Normal file
70
vendor/github.com/containers/storage/pkg/parsers/parsers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
// Package parsers provides helper functions to parse and validate different type
|
||||
// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel
|
||||
// operating system versions.
|
||||
package parsers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
|
||||
func ParseKeyValueOpt(opt string) (string, string, error) {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("unable to parse key/value option: %s", opt)
|
||||
}
|
||||
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
|
||||
}
|
||||
|
||||
// ParseUintList parses and validates the specified string as the value
|
||||
// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be
|
||||
// one of the formats below. Note that duplicates are actually allowed in the
|
||||
// input string. It returns a `map[int]bool` with available elements from `val`
|
||||
// set to `true`.
|
||||
// Supported formats:
|
||||
//
|
||||
// 7
|
||||
// 1-6
|
||||
// 0,3-4,7,8-10
|
||||
// 0-0,0,1-7
|
||||
// 03,1-3 <- this is gonna get parsed as [1,2,3]
|
||||
// 3,2,1
|
||||
// 0-2,3,1
|
||||
func ParseUintList(val string) (map[int]bool, error) {
|
||||
if val == "" {
|
||||
return map[int]bool{}, nil
|
||||
}
|
||||
|
||||
availableInts := make(map[int]bool)
|
||||
split := strings.Split(val, ",")
|
||||
errInvalidFormat := fmt.Errorf("invalid format: %s", val)
|
||||
|
||||
for _, r := range split {
|
||||
if !strings.Contains(r, "-") {
|
||||
v, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
availableInts[v] = true
|
||||
} else {
|
||||
split := strings.SplitN(r, "-", 2)
|
||||
min, err := strconv.Atoi(split[0])
|
||||
if err != nil {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
max, err := strconv.Atoi(split[1])
|
||||
if err != nil {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
if max < min {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
for i := min; i <= max; i++ {
|
||||
availableInts[i] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return availableInts, nil
|
||||
}
|
||||
1
vendor/github.com/containers/storage/pkg/stringid/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/stringid/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
This package provides helper functions for dealing with string identifiers
|
||||
106
vendor/github.com/containers/storage/pkg/stringid/stringid.go
generated
vendored
Normal file
106
vendor/github.com/containers/storage/pkg/stringid/stringid.go
generated
vendored
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
// Package stringid provides helper functions for dealing with string identifiers
|
||||
package stringid
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
|
||||
const shortLen = 12
|
||||
|
||||
var (
|
||||
validShortID = regexp.Delayed("^[a-f0-9]{12}$")
|
||||
validHex = regexp.Delayed(`^[a-f0-9]{64}$`)
|
||||
|
||||
rngLock sync.Mutex
|
||||
rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held.
|
||||
)
|
||||
|
||||
// IsShortID determines if an arbitrary string *looks like* a short ID.
|
||||
func IsShortID(id string) bool {
|
||||
return validShortID.MatchString(id)
|
||||
}
|
||||
|
||||
// TruncateID returns a shorthand version of a string identifier for convenience.
|
||||
// A collision with other shorthands is very unlikely, but possible.
|
||||
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
|
||||
// will need to use a longer prefix, or the full-length Id.
|
||||
func TruncateID(id string) string {
|
||||
if i := strings.IndexRune(id, ':'); i >= 0 {
|
||||
id = id[i+1:]
|
||||
}
|
||||
if len(id) > shortLen {
|
||||
id = id[:shortLen]
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func generateID(r io.Reader) string {
|
||||
b := make([]byte, 32)
|
||||
for {
|
||||
if _, err := io.ReadFull(r, b); err != nil {
|
||||
panic(err) // This shouldn't happen
|
||||
}
|
||||
id := hex.EncodeToString(b)
|
||||
// if we try to parse the truncated for as an int and we don't have
|
||||
// an error then the value is all numeric and causes issues when
|
||||
// used as a hostname. ref #3869
|
||||
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
|
||||
continue
|
||||
}
|
||||
return id
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateRandomID returns a pseudorandom 64-character hex string.
|
||||
func GenerateRandomID() string {
|
||||
return generateID(cryptorand.Reader)
|
||||
}
|
||||
|
||||
// GenerateNonCryptoID generates unique id without using cryptographically
|
||||
// secure sources of random.
|
||||
// It helps you to save entropy.
|
||||
func GenerateNonCryptoID() string {
|
||||
rngLock.Lock()
|
||||
defer rngLock.Unlock()
|
||||
return generateID(readerFunc(rng.Read))
|
||||
}
|
||||
|
||||
// ValidateID checks whether an ID string is a valid image ID.
|
||||
func ValidateID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Initialize a private RNG so we generate random ids. Tries to use a
|
||||
// crypto seed before falling back to time.
|
||||
var seed int64
|
||||
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
|
||||
// This should not happen, but worst-case fallback to time-based seed.
|
||||
seed = time.Now().UnixNano()
|
||||
} else {
|
||||
seed = cryptoseed.Int64()
|
||||
}
|
||||
|
||||
rng = rand.New(rand.NewSource(seed))
|
||||
}
|
||||
|
||||
type readerFunc func(p []byte) (int, error)
|
||||
|
||||
func (fn readerFunc) Read(p []byte) (int, error) {
|
||||
return fn(p)
|
||||
}
|
||||
1
vendor/github.com/containers/storage/pkg/stringutils/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/stringutils/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
This package provides helper functions for dealing with strings
|
||||
110
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
Normal file
110
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
// Package stringutils provides helper functions for dealing with strings.
|
||||
package stringutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
|
||||
func GenerateRandomAlphaOnlyString(n int) string {
|
||||
// make a really long string
|
||||
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// GenerateRandomASCIIString generates an ASCII random string with length n.
|
||||
func GenerateRandomASCIIString(n int) string {
|
||||
chars := "abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
|
||||
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
|
||||
res := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
res[i] = chars[rand.Intn(len(chars))]
|
||||
}
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...).
|
||||
// For maxlen of 3 and lower, no ellipsis is appended.
|
||||
func Ellipsis(s string, maxlen int) string {
|
||||
r := []rune(s)
|
||||
if len(r) <= maxlen {
|
||||
return s
|
||||
}
|
||||
if maxlen <= 3 {
|
||||
return string(r[:maxlen])
|
||||
}
|
||||
return string(r[:maxlen-3]) + "..."
|
||||
}
|
||||
|
||||
// Truncate truncates a string to maxlen.
|
||||
func Truncate(s string, maxlen int) string {
|
||||
r := []rune(s)
|
||||
if len(r) <= maxlen {
|
||||
return s
|
||||
}
|
||||
return string(r[:maxlen])
|
||||
}
|
||||
|
||||
// InSlice tests whether a string is contained in a slice of strings or not.
|
||||
// Comparison is case insensitive
|
||||
func InSlice(slice []string, s string) bool {
|
||||
for _, ss := range slice {
|
||||
if strings.EqualFold(s, ss) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveFromSlice removes a string from a slice. The string can be present
|
||||
// multiple times. The entire slice is iterated.
|
||||
func RemoveFromSlice(slice []string, s string) (ret []string) {
|
||||
for _, ss := range slice {
|
||||
if !strings.EqualFold(s, ss) {
|
||||
ret = append(ret, ss)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func quote(word string, buf *bytes.Buffer) {
|
||||
// Bail out early for "simple" strings
|
||||
if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
|
||||
buf.WriteString(word)
|
||||
return
|
||||
}
|
||||
|
||||
buf.WriteString("'")
|
||||
|
||||
for i := 0; i < len(word); i++ {
|
||||
b := word[i]
|
||||
if b == '\'' {
|
||||
// Replace literal ' with a close ', a \', and an open '
|
||||
buf.WriteString("'\\''")
|
||||
} else {
|
||||
buf.WriteByte(b)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("'")
|
||||
}
|
||||
|
||||
// ShellQuoteArguments takes a list of strings and escapes them so they will be
|
||||
// handled right when passed as arguments to a program via a shell
|
||||
func ShellQuoteArguments(args []string) string {
|
||||
var buf bytes.Buffer
|
||||
for i, arg := range args {
|
||||
if i != 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
quote(arg, &buf)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
66
vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
generated
vendored
Normal file
66
vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
generated
vendored
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
package tarlog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
type tarLogger struct {
|
||||
writer *io.PipeWriter
|
||||
closeMutex *sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewLogger returns a writer that, when a tar archive is written to it, calls
|
||||
// `logger` for each file header it encounters in the archive.
|
||||
func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
t := &tarLogger{
|
||||
writer: writer,
|
||||
closeMutex: new(sync.Mutex),
|
||||
closed: false,
|
||||
}
|
||||
tr := tar.NewReader(reader)
|
||||
t.closeMutex.Lock()
|
||||
go func() {
|
||||
hdr, err := tr.Next()
|
||||
for err == nil {
|
||||
logger(hdr)
|
||||
hdr, err = tr.Next()
|
||||
|
||||
}
|
||||
// Make sure to avoid writes after the reader has been closed.
|
||||
if err := reader.Close(); err != nil {
|
||||
logrus.Errorf("Closing tarlogger reader: %v", err)
|
||||
}
|
||||
// Unblock the Close().
|
||||
t.closeMutex.Unlock()
|
||||
}()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *tarLogger) Write(b []byte) (int, error) {
|
||||
if t.closed {
|
||||
// We cannot use os.Pipe() as this alters the tar's digest. Using
|
||||
// io.Pipe() requires this workaround as it does not allow for writes
|
||||
// after close.
|
||||
return len(b), nil
|
||||
}
|
||||
n, err := t.writer.Write(b)
|
||||
if err == io.ErrClosedPipe {
|
||||
// The pipe got closed. Track it and avoid to call Write in future.
|
||||
t.closed = true
|
||||
return len(b), nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (t *tarLogger) Close() error {
|
||||
err := t.writer.Close()
|
||||
// Wait for the reader to finish.
|
||||
t.closeMutex.Lock()
|
||||
return err
|
||||
}
|
||||
139
vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
generated
vendored
Normal file
139
vendor/github.com/containers/storage/pkg/truncindex/truncindex.go
generated
vendored
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
// Package truncindex provides a general 'index tree', used by Docker
|
||||
// in order to be able to reference containers by only a few unambiguous
|
||||
// characters of their id.
|
||||
package truncindex
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/tchap/go-patricia/v2/patricia"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEmptyPrefix is an error returned if the prefix was empty.
|
||||
ErrEmptyPrefix = errors.New("prefix can't be empty")
|
||||
|
||||
// ErrIllegalChar is returned when a space is in the ID
|
||||
ErrIllegalChar = errors.New("illegal character: ' '")
|
||||
|
||||
// ErrNotExist is returned when ID or its prefix not found in index.
|
||||
ErrNotExist = errors.New("ID does not exist")
|
||||
)
|
||||
|
||||
// ErrAmbiguousPrefix is returned if the prefix was ambiguous
|
||||
// (multiple ids for the prefix).
|
||||
type ErrAmbiguousPrefix struct { //nolint: errname
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (e ErrAmbiguousPrefix) Error() string {
|
||||
return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
|
||||
}
|
||||
|
||||
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
|
||||
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
|
||||
type TruncIndex struct {
|
||||
sync.RWMutex
|
||||
trie *patricia.Trie
|
||||
ids map[string]struct{}
|
||||
}
|
||||
|
||||
// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
|
||||
// Invalid IDs are _silently_ ignored.
|
||||
func NewTruncIndex(ids []string) (idx *TruncIndex) {
|
||||
idx = &TruncIndex{
|
||||
ids: make(map[string]struct{}),
|
||||
|
||||
// Change patricia max prefix per node length,
|
||||
// because our len(ID) always 64
|
||||
trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
|
||||
}
|
||||
for _, id := range ids {
|
||||
_ = idx.addID(id) // Ignore invalid IDs. Duplicate IDs are not a problem.
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (idx *TruncIndex) addID(id string) error {
|
||||
if strings.Contains(id, " ") {
|
||||
return ErrIllegalChar
|
||||
}
|
||||
if id == "" {
|
||||
return ErrEmptyPrefix
|
||||
}
|
||||
if _, exists := idx.ids[id]; exists {
|
||||
return fmt.Errorf("id already exists: '%s'", id)
|
||||
}
|
||||
idx.ids[id] = struct{}{}
|
||||
if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
|
||||
return fmt.Errorf("failed to insert id: %s", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add adds a new ID to the TruncIndex.
|
||||
func (idx *TruncIndex) Add(id string) error {
|
||||
idx.Lock()
|
||||
defer idx.Unlock()
|
||||
return idx.addID(id)
|
||||
}
|
||||
|
||||
// Delete removes an ID from the TruncIndex. If there are multiple IDs
|
||||
// with the given prefix, an error is thrown.
|
||||
func (idx *TruncIndex) Delete(id string) error {
|
||||
idx.Lock()
|
||||
defer idx.Unlock()
|
||||
if _, exists := idx.ids[id]; !exists || id == "" {
|
||||
return fmt.Errorf("no such id: '%s'", id)
|
||||
}
|
||||
delete(idx.ids, id)
|
||||
if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
|
||||
return fmt.Errorf("no such id: '%s'", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves an ID from the TruncIndex. If there are multiple IDs
|
||||
// with the given prefix, an error is thrown.
|
||||
func (idx *TruncIndex) Get(s string) (string, error) {
|
||||
if s == "" {
|
||||
return "", ErrEmptyPrefix
|
||||
}
|
||||
var id string
|
||||
subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
|
||||
if id != "" {
|
||||
// we haven't found the ID if there are two or more IDs
|
||||
id = ""
|
||||
return ErrAmbiguousPrefix{prefix: string(prefix)}
|
||||
}
|
||||
id = string(prefix)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx.RLock()
|
||||
defer idx.RUnlock()
|
||||
if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if id != "" {
|
||||
return id, nil
|
||||
}
|
||||
return "", ErrNotExist
|
||||
}
|
||||
|
||||
// Iterate iterates over all stored IDs and passes each of them to the given
|
||||
// handler. Take care that the handler method does not call any public
|
||||
// method on truncindex as the internal locking is not reentrant/recursive
|
||||
// and will result in deadlock.
|
||||
func (idx *TruncIndex) Iterate(handler func(id string)) {
|
||||
idx.Lock()
|
||||
defer idx.Unlock()
|
||||
// Ignore the error from Visit: it can only fail if the provided visitor fails, and ours never does.
|
||||
_ = idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
|
||||
handler(string(prefix))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue