go.mod: update osbuild/images to v0.69.0
This commit is contained in:
parent
1cc90c6a0b
commit
8ac80e8abc
611 changed files with 28281 additions and 32629 deletions
2
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
|
|
@ -1023,7 +1023,7 @@ loop:
|
|||
// Not the root directory, ensure that the parent directory exists
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
|
|
@ -13,6 +13,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/pools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
|
|
@ -106,7 +107,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
|||
|
||||
func aufsWhiteoutPresent(root, path string) (bool, error) {
|
||||
f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path))
|
||||
_, err := os.Stat(filepath.Join(root, f))
|
||||
err := fileutils.Exists(filepath.Join(root, f))
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/pkg/archive/copy.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/copy.go
generated
vendored
|
|
@ -8,6 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -94,7 +95,7 @@ func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
|||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||
sourcePath = normalizePath(sourcePath)
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
if err = fileutils.Lexists(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
// error.
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
|
|
@ -10,6 +10,7 @@ import (
|
|||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/pools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
|
|
@ -84,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 0o755)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
|
@ -130,7 +131,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
dir := filepath.Dir(path)
|
||||
if base == WhiteoutOpaqueDir {
|
||||
_, err := os.Lstat(dir)
|
||||
err := fileutils.Lexists(dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
|
|
@ -9,6 +9,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
)
|
||||
|
|
@ -76,7 +77,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
|||
rootIDs := idMappings.RootPair()
|
||||
|
||||
dest = filepath.Clean(dest)
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(dest); os.IsNotExist(err) {
|
||||
if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
87
vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go
generated
vendored
Normal file
87
vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
package chunked
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
)
|
||||
|
||||
type bloomFilter struct {
|
||||
bitArray []uint64
|
||||
k uint32
|
||||
}
|
||||
|
||||
func newBloomFilter(size int, k uint32) *bloomFilter {
|
||||
numElements := (size + 63) / 64
|
||||
if numElements == 0 {
|
||||
numElements = 1
|
||||
}
|
||||
return &bloomFilter{
|
||||
bitArray: make([]uint64, numElements),
|
||||
k: k,
|
||||
}
|
||||
}
|
||||
|
||||
func newBloomFilterFromArray(bitArray []uint64, k uint32) *bloomFilter {
|
||||
return &bloomFilter{
|
||||
bitArray: bitArray,
|
||||
k: k,
|
||||
}
|
||||
}
|
||||
|
||||
func (bf *bloomFilter) hashFn(item []byte, seed uint32) (uint64, uint64) {
|
||||
if len(item) == 0 {
|
||||
return 0, 1
|
||||
}
|
||||
mod := uint32(len(bf.bitArray) * 64)
|
||||
seedSplit := seed % uint32(len(item))
|
||||
hash := (crc32.ChecksumIEEE(item[:seedSplit]) ^ crc32.ChecksumIEEE(item[seedSplit:])) % mod
|
||||
return uint64(hash / 64), uint64(1 << (hash % 64))
|
||||
}
|
||||
|
||||
func (bf *bloomFilter) add(item []byte) {
|
||||
for i := uint32(0); i < bf.k; i++ {
|
||||
index, mask := bf.hashFn(item, i)
|
||||
bf.bitArray[index] |= mask
|
||||
}
|
||||
}
|
||||
|
||||
func (bf *bloomFilter) maybeContains(item []byte) bool {
|
||||
for i := uint32(0); i < bf.k; i++ {
|
||||
index, mask := bf.hashFn(item, i)
|
||||
if bf.bitArray[index]&mask == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (bf *bloomFilter) writeTo(writer io.Writer) error {
|
||||
if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readBloomFilter(reader io.Reader) (*bloomFilter, error) {
|
||||
var bloomFilterLen uint64
|
||||
var k uint32
|
||||
|
||||
if err := binary.Read(reader, binary.LittleEndian, &bloomFilterLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Read(reader, binary.LittleEndian, &k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bloomFilterArray := make([]uint64, bloomFilterLen)
|
||||
if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBloomFilterFromArray(bloomFilterArray, k), nil
|
||||
}
|
||||
875
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
875
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
File diff suppressed because it is too large
Load diff
124
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
124
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
|
|
@ -7,7 +7,6 @@ import (
|
|||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
|
|
@ -33,7 +32,7 @@ func typeToTarType(t string) (byte, error) {
|
|||
return r, nil
|
||||
}
|
||||
|
||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) {
|
||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||
footerSize := int64(51)
|
||||
if blobSize <= footerSize {
|
||||
|
|
@ -126,91 +125,53 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
return nil, 0, err
|
||||
}
|
||||
|
||||
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if manifestDigester.Digest() != d {
|
||||
if manifestDigester.Digest() != tocDigest {
|
||||
return nil, 0, errors.New("invalid manifest checksum")
|
||||
}
|
||||
|
||||
return manifestUncompressed, tocOffset, nil
|
||||
}
|
||||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
||||
// be specified.
|
||||
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) {
|
||||
footerSize := int64(internal.FooterSizeSupported)
|
||||
if blobSize <= footerSize {
|
||||
return nil, nil, 0, errors.New("blob too small")
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||
if offsetMetadata == "" {
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
|
||||
}
|
||||
|
||||
var footerData internal.ZstdChunkedFooterData
|
||||
|
||||
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
||||
var err error
|
||||
footerData, err = internal.ReadFooterDataFromAnnotations(annotations)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
} else {
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: uint64(blobSize - footerSize),
|
||||
Length: uint64(footerSize),
|
||||
}
|
||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
var reader io.ReadCloser
|
||||
select {
|
||||
case r := <-parts:
|
||||
reader = r
|
||||
case err := <-errs:
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
footerData, err = internal.ReadFooterDataFromBlob(footer)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
var manifestChunk ImageSourceChunk
|
||||
var manifestLengthUncompressed, manifestType uint64
|
||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &manifestChunk.Offset, &manifestChunk.Length, &manifestLengthUncompressed, &manifestType); err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
|
||||
var tarSplitChunk ImageSourceChunk
|
||||
var tarSplitLengthUncompressed uint64
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if footerData.ManifestType != internal.ManifestTypeCRFS {
|
||||
return nil, nil, 0, errors.New("invalid manifest type")
|
||||
if manifestType != internal.ManifestTypeCRFS {
|
||||
return nil, nil, nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if footerData.LengthCompressed > (1<<20)*50 {
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestChunk.Length > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
if footerData.LengthUncompressed > (1<<20)*50 {
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
if manifestLengthUncompressed > (1<<20)*50 {
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
chunk := ImageSourceChunk{
|
||||
Offset: footerData.Offset,
|
||||
Length: footerData.LengthCompressed,
|
||||
chunks := []ImageSourceChunk{manifestChunk}
|
||||
if tarSplitChunk.Offset > 0 {
|
||||
chunks = append(chunks, tarSplitChunk)
|
||||
}
|
||||
|
||||
chunks := []ImageSourceChunk{chunk}
|
||||
|
||||
if footerData.OffsetTarSplit > 0 {
|
||||
chunkTarSplit := ImageSourceChunk{
|
||||
Offset: footerData.OffsetTarSplit,
|
||||
Length: footerData.LengthCompressedTarSplit,
|
||||
}
|
||||
chunks = append(chunks, chunkTarSplit)
|
||||
}
|
||||
|
||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
|
|
@ -233,34 +194,39 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
|||
return blob, nil
|
||||
}
|
||||
|
||||
manifest, err := readBlob(footerData.LengthCompressed)
|
||||
manifest, err := readBlob(manifestChunk.Length)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
decodedBlob, err := decodeAndValidateBlob(manifest, footerData.LengthUncompressed, footerData.ChecksumAnnotation)
|
||||
decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String())
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing TOC: %w", err)
|
||||
}
|
||||
toc, err := unmarshalToc(decodedBlob)
|
||||
if err != nil {
|
||||
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
|
||||
}
|
||||
|
||||
decodedTarSplit := []byte{}
|
||||
if footerData.OffsetTarSplit > 0 {
|
||||
tarSplit, err := readBlob(footerData.LengthCompressedTarSplit)
|
||||
if tarSplitChunk.Offset > 0 {
|
||||
tarSplit, err := readBlob(tarSplitChunk.Length)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, footerData.LengthUncompressedTarSplit, footerData.ChecksumAnnotationTarSplit)
|
||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
|
||||
}
|
||||
}
|
||||
return decodedBlob, decodedTarSplit, int64(footerData.Offset), err
|
||||
return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
|
||||
}
|
||||
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
|
||||
d, err := digest.Parse(expectedCompressedChecksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err)
|
||||
}
|
||||
|
||||
blobDigester := d.Algorithm().Digester()
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
|
|
@ -52,7 +52,7 @@ func escaped(val string, escape int) string {
|
|||
if noescapeSpace {
|
||||
hexEscape = !unicode.IsPrint(rune(c))
|
||||
} else {
|
||||
hexEscape = !unicode.IsGraphic(rune(c))
|
||||
hexEscape = !unicode.IsPrint(rune(c)) || unicode.IsSpace(rune(c))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
77
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
77
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
|
|
@ -8,7 +8,6 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
|
@ -19,11 +18,9 @@ import (
|
|||
)
|
||||
|
||||
type TOC struct {
|
||||
Version int `json:"version"`
|
||||
Entries []FileMetadata `json:"entries"`
|
||||
|
||||
// internal: used by unmarshalToc
|
||||
StringsBuf bytes.Buffer `json:"-"`
|
||||
Version int `json:"version"`
|
||||
Entries []FileMetadata `json:"entries"`
|
||||
TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
|
||||
}
|
||||
|
||||
type FileMetadata struct {
|
||||
|
|
@ -48,9 +45,6 @@ type FileMetadata struct {
|
|||
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
||||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||
ChunkType string `json:"chunkType,omitempty"`
|
||||
|
||||
// internal: computed by mergeTOCEntries.
|
||||
Chunks []*FileMetadata `json:"-"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
@ -91,9 +85,10 @@ func GetType(t byte) (string, error) {
|
|||
const (
|
||||
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
|
||||
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
|
||||
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
|
||||
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
|
||||
|
||||
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written.
|
||||
|
||||
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
||||
ManifestTypeCRFS = 1
|
||||
|
||||
|
|
@ -140,8 +135,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
manifestOffset := offset + zstdSkippableFrameHeader
|
||||
|
||||
toc := TOC{
|
||||
Version: 1,
|
||||
Entries: metadata,
|
||||
Version: 1,
|
||||
Entries: metadata,
|
||||
TarSplitDigest: tarSplitData.Digest,
|
||||
}
|
||||
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
|
@ -177,7 +173,6 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
return err
|
||||
}
|
||||
|
||||
outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
|
||||
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
|
||||
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
|
||||
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
|
||||
|
|
@ -189,11 +184,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
Offset: manifestOffset,
|
||||
LengthCompressed: uint64(len(compressedManifest)),
|
||||
LengthUncompressed: uint64(len(manifest)),
|
||||
ChecksumAnnotation: "", // unused
|
||||
OffsetTarSplit: uint64(tarSplitOffset),
|
||||
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
|
||||
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
|
||||
ChecksumAnnotationTarSplit: "", // unused
|
||||
}
|
||||
|
||||
manifestDataLE := footerDataToBlob(footer)
|
||||
|
|
@ -207,18 +200,22 @@ func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
|||
}
|
||||
|
||||
// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer.
|
||||
// This footer exists to make the blobs self-describing, our implementation
|
||||
// never reads it:
|
||||
// Partial pull security hinges on the TOC digest, and that exists as a layer annotation;
|
||||
// so we are relying on the layer annotations anyway, and doing so means we can avoid
|
||||
// a round-trip to fetch this binary footer.
|
||||
type ZstdChunkedFooterData struct {
|
||||
ManifestType uint64
|
||||
|
||||
Offset uint64
|
||||
LengthCompressed uint64
|
||||
LengthUncompressed uint64
|
||||
ChecksumAnnotation string // Only used when reading a layer, not when creating it
|
||||
|
||||
OffsetTarSplit uint64
|
||||
LengthCompressedTarSplit uint64
|
||||
LengthUncompressedTarSplit uint64
|
||||
ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it
|
||||
ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose.
|
||||
}
|
||||
|
||||
func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
|
||||
|
|
@ -235,49 +232,3 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
|
|||
|
||||
return manifestDataLE
|
||||
}
|
||||
|
||||
// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations.
|
||||
func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) {
|
||||
var footerData ZstdChunkedFooterData
|
||||
|
||||
footerData.ChecksumAnnotation = annotations[ManifestChecksumKey]
|
||||
if footerData.ChecksumAnnotation == "" {
|
||||
return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey)
|
||||
}
|
||||
|
||||
offsetMetadata := annotations[ManifestInfoKey]
|
||||
|
||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil {
|
||||
return footerData, err
|
||||
}
|
||||
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil {
|
||||
return footerData, err
|
||||
}
|
||||
footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey]
|
||||
}
|
||||
return footerData, nil
|
||||
}
|
||||
|
||||
// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer.
|
||||
func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) {
|
||||
var footerData ZstdChunkedFooterData
|
||||
|
||||
if len(footer) < FooterSizeSupported {
|
||||
return footerData, errors.New("blob too small")
|
||||
}
|
||||
footerData.Offset = binary.LittleEndian.Uint64(footer[0:8])
|
||||
footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16])
|
||||
footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
||||
footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32])
|
||||
footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40])
|
||||
footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48])
|
||||
footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56])
|
||||
|
||||
// the magic number is stored in the last 8 bytes
|
||||
if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) {
|
||||
return footerData, errors.New("invalid magic number")
|
||||
}
|
||||
return footerData, nil
|
||||
}
|
||||
|
|
|
|||
191
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
191
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/fsverity"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
|
|
@ -58,11 +59,27 @@ const (
|
|||
copyGoRoutines = 32
|
||||
)
|
||||
|
||||
// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
|
||||
// are not part of the TOC document.
|
||||
// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
|
||||
type fileMetadata struct {
|
||||
internal.FileMetadata
|
||||
|
||||
// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
|
||||
chunks []*internal.FileMetadata
|
||||
|
||||
// skipSetAttrs is set when the file attributes must not be
|
||||
// modified, e.g. it is a hard link from a different source,
|
||||
// or a composefs file.
|
||||
skipSetAttrs bool
|
||||
}
|
||||
|
||||
type compressedFileType int
|
||||
|
||||
type chunkedDiffer struct {
|
||||
stream ImageSourceSeekable
|
||||
manifest []byte
|
||||
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
layersCache *layersCache
|
||||
tocOffset int64
|
||||
|
|
@ -138,7 +155,8 @@ func doHardLink(srcFd int, destDirFd int, destBase string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
|
||||
func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
|
||||
destFile := fileMetadata.Name
|
||||
src := fmt.Sprintf("/proc/self/fd/%d", srcFd)
|
||||
st, err := os.Stat(src)
|
||||
if err != nil {
|
||||
|
|
@ -156,6 +174,8 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
|
|||
|
||||
err := doHardLink(srcFd, int(destDir.Fd()), destBase)
|
||||
if err == nil {
|
||||
// if the file was deduplicated with a hard link, skip overriding file metadata.
|
||||
fileMetadata.skipSetAttrs = true
|
||||
return nil, st.Size(), nil
|
||||
}
|
||||
}
|
||||
|
|
@ -198,15 +218,15 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser,
|
|||
return streams, errs, nil
|
||||
}
|
||||
|
||||
func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) {
|
||||
func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
|
||||
diff, err := archive.DecompressStream(payload)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
return 0, nil, "", nil, err
|
||||
}
|
||||
|
||||
fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
return 0, nil, "", nil, err
|
||||
}
|
||||
|
||||
f := os.NewFile(uintptr(fd), destDirectory)
|
||||
|
|
@ -216,23 +236,24 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableF
|
|||
chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, "", nil, err
|
||||
return 0, nil, "", nil, err
|
||||
}
|
||||
|
||||
convertedOutputDigester := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil {
|
||||
copied, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, "", nil, err
|
||||
return 0, nil, "", nil, err
|
||||
}
|
||||
if err := chunked.Close(); err != nil {
|
||||
f.Close()
|
||||
return nil, "", nil, err
|
||||
return 0, nil, "", nil, err
|
||||
}
|
||||
is := seekableFile{
|
||||
file: f,
|
||||
}
|
||||
|
||||
return &is, convertedOutputDigester.Digest(), newAnnotations, nil
|
||||
return copied, &is, convertedOutputDigester.Digest(), newAnnotations, nil
|
||||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
|
|
@ -246,18 +267,26 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
|
|||
return nil, errors.New("enable_partial_images not configured")
|
||||
}
|
||||
|
||||
_, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
_, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||
|
||||
if hasZstdChunkedTOC && hasEstargzTOC {
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
}
|
||||
|
||||
if hasZstdChunkedTOC {
|
||||
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
|
||||
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
|
||||
}
|
||||
return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, &storeOpts)
|
||||
}
|
||||
if hasEstargzTOC {
|
||||
return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
|
||||
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
|
||||
}
|
||||
return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, &storeOpts)
|
||||
}
|
||||
|
||||
return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts)
|
||||
|
|
@ -285,8 +314,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
|
|||
}, nil
|
||||
}
|
||||
|
||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations)
|
||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
|
@ -295,11 +324,6 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||
return nil, err
|
||||
}
|
||||
|
||||
tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
|
|
@ -308,6 +332,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
toc: toc,
|
||||
storeOpts: storeOpts,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
|
|
@ -315,8 +340,8 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||
}, nil
|
||||
}
|
||||
|
||||
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations)
|
||||
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
|
@ -325,11 +350,6 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
|
|||
return nil, err
|
||||
}
|
||||
|
||||
tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
|
|
@ -354,7 +374,7 @@ func makeCopyBuffer() []byte {
|
|||
// name is the path to the file to copy in source.
|
||||
// dirfd is an open file descriptor to the destination root directory.
|
||||
// useHardLinks defines whether the deduplication can be performed using hard links.
|
||||
func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
func copyFileFromOtherLayer(file *fileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return false, nil, 0, fmt.Errorf("open source file: %w", err)
|
||||
|
|
@ -367,7 +387,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str
|
|||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks)
|
||||
dstFile, written, err := copyFileContent(int(srcFile.Fd()), file, dirfd, 0, useHardLinks)
|
||||
if err != nil {
|
||||
return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err)
|
||||
}
|
||||
|
|
@ -376,7 +396,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str
|
|||
|
||||
// canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile.
|
||||
// It checks that the two files have the same UID, GID, file mode and xattrs.
|
||||
func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool {
|
||||
func canDedupMetadataWithHardLink(file *fileMetadata, otherFile *fileMetadata) bool {
|
||||
if file.UID != otherFile.UID {
|
||||
return false
|
||||
}
|
||||
|
|
@ -394,7 +414,7 @@ func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *intern
|
|||
|
||||
// canDedupFileWithHardLink checks if the specified file can be deduplicated by an
|
||||
// open file, given its descriptor and stat data.
|
||||
func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool {
|
||||
func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
|
||||
st, ok := s.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false
|
||||
|
|
@ -420,11 +440,13 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
|
|||
xattrs[x] = string(v)
|
||||
}
|
||||
// fill only the attributes used by canDedupMetadataWithHardLink.
|
||||
otherFile := internal.FileMetadata{
|
||||
UID: int(st.Uid),
|
||||
GID: int(st.Gid),
|
||||
Mode: int64(st.Mode),
|
||||
Xattrs: xattrs,
|
||||
otherFile := fileMetadata{
|
||||
FileMetadata: internal.FileMetadata{
|
||||
UID: int(st.Uid),
|
||||
GID: int(st.Gid),
|
||||
Mode: int64(st.Mode),
|
||||
Xattrs: xattrs,
|
||||
},
|
||||
}
|
||||
return canDedupMetadataWithHardLink(file, &otherFile)
|
||||
}
|
||||
|
|
@ -434,7 +456,7 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
|
|||
// ostreeRepos is a list of OSTree repos.
|
||||
// dirfd is an open fd to the destination checkout.
|
||||
// useHardLinks defines whether the deduplication can be performed using hard links.
|
||||
func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
func findFileInOSTreeRepos(file *fileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
digest, err := digest.Parse(file.Digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("could not parse digest: %v", err)
|
||||
|
|
@ -467,7 +489,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di
|
|||
continue
|
||||
}
|
||||
|
||||
dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks)
|
||||
dstFile, written, err := copyFileContent(fd, file, dirfd, 0, useHardLinks)
|
||||
if err != nil {
|
||||
logrus.Debugf("could not copyFileContent: %v", err)
|
||||
return false, nil, 0, nil
|
||||
|
|
@ -487,7 +509,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di
|
|||
// file is the file to look for.
|
||||
// dirfd is an open file descriptor to the checkout root directory.
|
||||
// useHardLinks defines whether the deduplication can be performed using hard links.
|
||||
func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
func findFileInOtherLayers(cache *layersCache, file *fileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
|
||||
target, name, err := cache.findFileInOtherLayers(file, useHardLinks)
|
||||
if err != nil || name == "" {
|
||||
return false, nil, 0, err
|
||||
|
|
@ -495,7 +517,7 @@ func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirf
|
|||
return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks)
|
||||
}
|
||||
|
||||
func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error {
|
||||
func maybeDoIDRemap(manifest []fileMetadata, options *archive.TarOptions) error {
|
||||
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -529,7 +551,7 @@ func mapToSlice(inputMap map[uint32]struct{}) []uint32 {
|
|||
return out
|
||||
}
|
||||
|
||||
func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) {
|
||||
func collectIDs(entries []fileMetadata) ([]uint32, []uint32) {
|
||||
uids := make(map[uint32]struct{})
|
||||
gids := make(map[uint32]struct{})
|
||||
for _, entry := range entries {
|
||||
|
|
@ -549,7 +571,7 @@ type missingFileChunk struct {
|
|||
Gap int64
|
||||
Hole bool
|
||||
|
||||
File *internal.FileMetadata
|
||||
File *fileMetadata
|
||||
|
||||
CompressedSize int64
|
||||
UncompressedSize int64
|
||||
|
|
@ -582,7 +604,10 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
// setFileAttrs sets the file attributes for file given metadata
|
||||
func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error {
|
||||
func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error {
|
||||
if metadata.skipSetAttrs {
|
||||
return nil
|
||||
}
|
||||
if file == nil || file.Fd() < 0 {
|
||||
return errors.New("invalid file")
|
||||
}
|
||||
|
|
@ -944,14 +969,14 @@ type destinationFile struct {
|
|||
dirfd int
|
||||
file *os.File
|
||||
hash hash.Hash
|
||||
metadata *internal.FileMetadata
|
||||
metadata *fileMetadata
|
||||
options *archive.TarOptions
|
||||
skipValidation bool
|
||||
to io.Writer
|
||||
recordFsVerity recordFsVerityFunc
|
||||
}
|
||||
|
||||
func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
|
||||
func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
|
||||
file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1314,7 +1339,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
|
|||
return nil
|
||||
}
|
||||
|
||||
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error {
|
||||
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
|
||||
parent := filepath.Dir(name)
|
||||
base := filepath.Base(name)
|
||||
|
||||
|
|
@ -1343,7 +1368,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.File
|
|||
return setFileAttrs(dirfd, file, mode, metadata, options, false)
|
||||
}
|
||||
|
||||
func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
|
||||
func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
|
||||
sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -1385,7 +1410,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti
|
|||
return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
|
||||
}
|
||||
|
||||
func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error {
|
||||
func safeSymlink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
|
||||
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
|
||||
destDirFd := dirfd
|
||||
if destDir != "." {
|
||||
|
|
@ -1473,7 +1498,7 @@ type hardLinkToCreate struct {
|
|||
dest string
|
||||
dirfd int
|
||||
mode os.FileMode
|
||||
metadata *internal.FileMetadata
|
||||
metadata *fileMetadata
|
||||
}
|
||||
|
||||
func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool {
|
||||
|
|
@ -1498,7 +1523,7 @@ func reopenFileReadOnly(f *os.File) (*os.File, error) {
|
|||
return os.NewFile(uintptr(fd), f.Name()), nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
|
||||
func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
|
||||
finalizeFile := func(dstFile *os.File) error {
|
||||
if dstFile == nil {
|
||||
return nil
|
||||
|
|
@ -1549,8 +1574,8 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMetadata, error) {
|
||||
var new []internal.FileMetadata
|
||||
func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
|
||||
var new []fileMetadata
|
||||
|
||||
hashes := make(map[string]string)
|
||||
for i := range mergedEntries {
|
||||
|
|
@ -1572,6 +1597,7 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta
|
|||
hashes[d] = d
|
||||
|
||||
mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
|
||||
mergedEntries[i].skipSetAttrs = true
|
||||
|
||||
new = append(new, mergedEntries[i])
|
||||
}
|
||||
|
|
@ -1629,6 +1655,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
stream := c.stream
|
||||
|
||||
var uncompressedDigest digest.Digest
|
||||
var convertedBlobSize int64
|
||||
|
||||
if c.convertToZstdChunked {
|
||||
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||
|
|
@ -1656,10 +1683,11 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
||||
fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
|
||||
tarSize, fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
convertedBlobSize = tarSize
|
||||
// fileSource is a O_TMPFILE file descriptor, so we
|
||||
// need to keep it open until the entire file is processed.
|
||||
defer fileSource.Close()
|
||||
|
|
@ -1668,7 +1696,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
blobFile.Close()
|
||||
blobFile = nil
|
||||
|
||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations)
|
||||
tocDigest, err := toc.GetTOCDigest(annotations)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: parsing just-created zstd:chunked TOC digest: %w", err)
|
||||
}
|
||||
if tocDigest == nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest")
|
||||
}
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
|
@ -1679,6 +1714,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
// fill the chunkedDiffer with the data we just read.
|
||||
c.fileType = fileTypeZstdChunked
|
||||
c.manifest = manifest
|
||||
c.toc = toc
|
||||
c.tarSplit = tarSplit
|
||||
c.tocOffset = tocOffset
|
||||
|
||||
|
|
@ -1699,9 +1735,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
|
||||
// Generate the manifest
|
||||
toc, err := unmarshalToc(c.manifest)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
toc := c.toc
|
||||
if toc == nil {
|
||||
toc_, err := unmarshalToc(c.manifest)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
toc = toc_
|
||||
}
|
||||
|
||||
output := graphdriver.DriverWithDifferOutput{
|
||||
|
|
@ -1729,14 +1769,19 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
|
||||
var missingParts []missingPart
|
||||
|
||||
output.UIDs, output.GIDs = collectIDs(toc.Entries)
|
||||
|
||||
mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||
mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
output.Size = totalSize
|
||||
output.UIDs, output.GIDs = collectIDs(mergedEntries)
|
||||
if convertedBlobSize > 0 {
|
||||
// if the image was converted, store the original tar size, so that
|
||||
// it can be recreated correctly.
|
||||
output.Size = convertedBlobSize
|
||||
} else {
|
||||
output.Size = totalSizeFromTOC
|
||||
}
|
||||
|
||||
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
|
||||
return output, err
|
||||
|
|
@ -1789,7 +1834,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
njob int
|
||||
index int
|
||||
mode os.FileMode
|
||||
metadata *internal.FileMetadata
|
||||
metadata *fileMetadata
|
||||
|
||||
found bool
|
||||
err error
|
||||
|
|
@ -1961,7 +2006,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
remainingSize := r.Size
|
||||
|
||||
// the file is missing, attempt to find individual chunks.
|
||||
for _, chunk := range r.Chunks {
|
||||
for _, chunk := range r.chunks {
|
||||
compressedSize := int64(chunk.EndOffset - chunk.Offset)
|
||||
size := remainingSize
|
||||
if chunk.ChunkSize > 0 {
|
||||
|
|
@ -2045,7 +2090,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, int64, error) {
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) {
|
||||
var totalFilesSize int64
|
||||
|
||||
countNextChunks := func(start int) int {
|
||||
|
|
@ -2069,11 +2114,11 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
|||
}
|
||||
}
|
||||
|
||||
mergedEntries := make([]internal.FileMetadata, size)
|
||||
mergedEntries := make([]fileMetadata, size)
|
||||
m := 0
|
||||
for i := 0; i < len(entries); i++ {
|
||||
e := entries[i]
|
||||
if mustSkipFile(fileType, e) {
|
||||
e := fileMetadata{FileMetadata: entries[i]}
|
||||
if mustSkipFile(fileType, entries[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -2086,12 +2131,12 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
|||
if e.Type == TypeReg {
|
||||
nChunks := countNextChunks(i + 1)
|
||||
|
||||
e.Chunks = make([]*internal.FileMetadata, nChunks+1)
|
||||
e.chunks = make([]*internal.FileMetadata, nChunks+1)
|
||||
for j := 0; j <= nChunks; j++ {
|
||||
// we need a copy here, otherwise we override the
|
||||
// .Size later
|
||||
copy := entries[i+j]
|
||||
e.Chunks[j] = ©
|
||||
e.chunks[j] = ©
|
||||
e.EndOffset = entries[i+j].EndOffset
|
||||
}
|
||||
i += nChunks
|
||||
|
|
@ -2110,10 +2155,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
|||
}
|
||||
|
||||
lastChunkOffset := mergedEntries[i].EndOffset
|
||||
for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- {
|
||||
mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset
|
||||
mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset
|
||||
lastChunkOffset = mergedEntries[i].Chunks[j].Offset
|
||||
for j := len(mergedEntries[i].chunks) - 1; j >= 0; j-- {
|
||||
mergedEntries[i].chunks[j].EndOffset = lastChunkOffset
|
||||
mergedEntries[i].chunks[j].Size = mergedEntries[i].chunks[j].EndOffset - mergedEntries[i].chunks[j].Offset
|
||||
lastChunkOffset = mergedEntries[i].chunks[j].Offset
|
||||
}
|
||||
}
|
||||
return mergedEntries, totalFilesSize, nil
|
||||
|
|
|
|||
126
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
126
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
|
|
@ -5,72 +5,6 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
|
||||
// TOML config table.
|
||||
type ThinpoolOptionsConfig struct {
|
||||
// AutoExtendPercent determines the amount by which pool needs to be
|
||||
// grown. This is specified in terms of % of pool size. So a value of
|
||||
// 20 means that when threshold is hit, pool will be grown by 20% of
|
||||
// existing pool size.
|
||||
AutoExtendPercent string `toml:"autoextend_percent,omitempty"`
|
||||
|
||||
// AutoExtendThreshold determines the pool extension threshold in terms
|
||||
// of percentage of pool size. For example, if threshold is 60, that
|
||||
// means when pool is 60% full, threshold has been hit.
|
||||
AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"`
|
||||
|
||||
// BaseSize specifies the size to use when creating the base device,
|
||||
// which limits the size of images and containers.
|
||||
BaseSize string `toml:"basesize,omitempty"`
|
||||
|
||||
// BlockSize specifies a custom blocksize to use for the thin pool.
|
||||
BlockSize string `toml:"blocksize,omitempty"`
|
||||
|
||||
// DirectLvmDevice specifies a custom block storage device to use for
|
||||
// the thin pool.
|
||||
DirectLvmDevice string `toml:"directlvm_device,omitempty"`
|
||||
|
||||
// DirectLvmDeviceForcewipes device even if device already has a
|
||||
// filesystem
|
||||
DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"`
|
||||
|
||||
// Fs specifies the filesystem type to use for the base device.
|
||||
Fs string `toml:"fs,omitempty"`
|
||||
|
||||
// log_level sets the log level of devicemapper.
|
||||
LogLevel string `toml:"log_level,omitempty"`
|
||||
|
||||
// MetadataSize specifies the size of the metadata for the thinpool
|
||||
// It will be used with the `pvcreate --metadata` option.
|
||||
MetadataSize string `toml:"metadatasize,omitempty"`
|
||||
|
||||
// MinFreeSpace specifies the min free space percent in a thin pool
|
||||
// require for new device creation to
|
||||
MinFreeSpace string `toml:"min_free_space,omitempty"`
|
||||
|
||||
// MkfsArg specifies extra mkfs arguments to be used when creating the
|
||||
// basedevice.
|
||||
MkfsArg string `toml:"mkfsarg,omitempty"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting the thin
|
||||
// devices.
|
||||
MountOpt string `toml:"mountopt,omitempty"`
|
||||
|
||||
// Size
|
||||
Size string `toml:"size,omitempty"`
|
||||
|
||||
// UseDeferredDeletion marks device for deferred deletion
|
||||
UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"`
|
||||
|
||||
// UseDeferredRemoval marks device for deferred removal
|
||||
UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"`
|
||||
|
||||
// XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
|
||||
// retries XFS should attempt to complete IO when ENOSPC (no space)
|
||||
// error is returned by underlying storage device.
|
||||
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"`
|
||||
}
|
||||
|
||||
type AufsOptionsConfig struct {
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt,omitempty"`
|
||||
|
|
@ -181,8 +115,8 @@ type OptionsConfig struct {
|
|||
// Btrfs container options to be handed to btrfs drivers
|
||||
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
|
||||
|
||||
// Thinpool container options to be handed to thinpool drivers
|
||||
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"`
|
||||
// Thinpool container options to be handed to thinpool drivers (NOP)
|
||||
Thinpool struct{} `toml:"thinpool,omitempty"`
|
||||
|
||||
// Overlay container options to be handed to overlay drivers
|
||||
Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
|
||||
|
|
@ -231,62 +165,6 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
|
|||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
|
||||
case "devicemapper":
|
||||
if options.Thinpool.AutoExtendPercent != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent))
|
||||
}
|
||||
if options.Thinpool.AutoExtendThreshold != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold))
|
||||
}
|
||||
if options.Thinpool.BaseSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize))
|
||||
}
|
||||
if options.Thinpool.BlockSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize))
|
||||
}
|
||||
if options.Thinpool.DirectLvmDevice != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice))
|
||||
}
|
||||
if options.Thinpool.DirectLvmDeviceForce != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce))
|
||||
}
|
||||
if options.Thinpool.Fs != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs))
|
||||
}
|
||||
if options.Thinpool.LogLevel != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
|
||||
}
|
||||
if options.Thinpool.MetadataSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize))
|
||||
}
|
||||
if options.Thinpool.MinFreeSpace != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
|
||||
}
|
||||
if options.Thinpool.MkfsArg != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg))
|
||||
}
|
||||
if options.Thinpool.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
|
||||
if options.Thinpool.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
|
||||
if options.Thinpool.UseDeferredDeletion != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion))
|
||||
}
|
||||
if options.Thinpool.UseDeferredRemoval != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval))
|
||||
}
|
||||
if options.Thinpool.XfsNoSpaceMaxRetries != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
|
||||
}
|
||||
|
||||
case "overlay", "overlay2":
|
||||
// Specify whether composefs must be used to mount the data layers
|
||||
if options.Overlay.IgnoreChownErrors != "" {
|
||||
|
|
|
|||
813
vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
generated
vendored
813
vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go
generated
vendored
|
|
@ -1,813 +0,0 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Same as DM_DEVICE_* enum values from libdevmapper.h
|
||||
// nolint: unused
|
||||
const (
|
||||
deviceCreate TaskType = iota
|
||||
deviceReload
|
||||
deviceRemove
|
||||
deviceRemoveAll
|
||||
deviceSuspend
|
||||
deviceResume
|
||||
deviceInfo
|
||||
deviceDeps
|
||||
deviceRename
|
||||
deviceVersion
|
||||
deviceStatus
|
||||
deviceTable
|
||||
deviceWaitevent
|
||||
deviceList
|
||||
deviceClear
|
||||
deviceMknodes
|
||||
deviceListVersions
|
||||
deviceTargetMsg
|
||||
deviceSetGeometry
|
||||
)
|
||||
|
||||
const (
|
||||
addNodeOnResume AddNodeType = iota
|
||||
addNodeOnCreate
|
||||
)
|
||||
|
||||
// List of errors returned when using devicemapper.
|
||||
var (
|
||||
ErrTaskRun = errors.New("dm_task_run failed")
|
||||
ErrTaskSetName = errors.New("dm_task_set_name failed")
|
||||
ErrTaskSetMessage = errors.New("dm_task_set_message failed")
|
||||
ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed")
|
||||
ErrTaskSetRo = errors.New("dm_task_set_ro failed")
|
||||
ErrTaskAddTarget = errors.New("dm_task_add_target failed")
|
||||
ErrTaskSetSector = errors.New("dm_task_set_sector failed")
|
||||
ErrTaskGetDeps = errors.New("dm_task_get_deps failed")
|
||||
ErrTaskGetInfo = errors.New("dm_task_get_info failed")
|
||||
ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
|
||||
ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed")
|
||||
ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
|
||||
ErrNilCookie = errors.New("cookie ptr can't be nil")
|
||||
ErrGetBlockSize = errors.New("Can't get block size")
|
||||
ErrUdevWait = errors.New("wait on udev cookie failed")
|
||||
ErrSetDevDir = errors.New("dm_set_dev_dir failed")
|
||||
ErrGetLibraryVersion = errors.New("dm_get_library_version failed")
|
||||
ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove")
|
||||
ErrRunRemoveDevice = errors.New("running RemoveDevice failed")
|
||||
ErrInvalidAddNode = errors.New("Invalid AddNode type")
|
||||
ErrBusy = errors.New("Device is Busy")
|
||||
ErrDeviceIDExists = errors.New("Device Id Exists")
|
||||
ErrEnxio = errors.New("No such device or address")
|
||||
)
|
||||
|
||||
var (
|
||||
dmSawBusy bool
|
||||
dmSawExist bool
|
||||
dmSawEnxio bool // No Such Device or Address
|
||||
)
|
||||
|
||||
type (
|
||||
// Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl
|
||||
// command to execute.
|
||||
Task struct {
|
||||
unmanaged *cdmTask
|
||||
}
|
||||
// Deps represents dependents (layer) of a device.
|
||||
Deps struct {
|
||||
Count uint32
|
||||
Filler uint32
|
||||
Device []uint64
|
||||
}
|
||||
// Info represents information about a device.
|
||||
Info struct {
|
||||
Exists int
|
||||
Suspended int
|
||||
LiveTable int
|
||||
InactiveTable int
|
||||
OpenCount int32
|
||||
EventNr uint32
|
||||
Major uint32
|
||||
Minor uint32
|
||||
ReadOnly int
|
||||
TargetCount int32
|
||||
DeferredRemove int
|
||||
}
|
||||
// TaskType represents a type of task
|
||||
TaskType int
|
||||
// AddNodeType represents a type of node to be added
|
||||
AddNodeType int
|
||||
)
|
||||
|
||||
// DeviceIDExists returns whether error conveys the information about device Id already
|
||||
// exist or not. This will be true if device creation or snap creation
|
||||
// operation fails if device or snap device already exists in pool.
|
||||
// Current implementation is little crude as it scans the error string
|
||||
// for exact pattern match. Replacing it with more robust implementation
|
||||
// is desirable.
|
||||
func DeviceIDExists(err error) bool {
|
||||
return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists)
|
||||
}
|
||||
|
||||
func (t *Task) destroy() {
|
||||
if t != nil {
|
||||
DmTaskDestroy(t.unmanaged)
|
||||
runtime.SetFinalizer(t, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// TaskCreateNamed is a convenience function for TaskCreate when a name
|
||||
// will be set on the task as well
|
||||
func TaskCreateNamed(t TaskType, name string) (*Task, error) {
|
||||
task := TaskCreate(t)
|
||||
if task == nil {
|
||||
return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t))
|
||||
}
|
||||
if err := task.setName(name); err != nil {
|
||||
return nil, fmt.Errorf("devicemapper: Can't set task name %s", name)
|
||||
}
|
||||
return task, nil
|
||||
}
|
||||
|
||||
// TaskCreate initializes a devicemapper task of tasktype
|
||||
func TaskCreate(tasktype TaskType) *Task {
|
||||
Ctask := DmTaskCreate(int(tasktype))
|
||||
if Ctask == nil {
|
||||
return nil
|
||||
}
|
||||
task := &Task{unmanaged: Ctask}
|
||||
runtime.SetFinalizer(task, (*Task).destroy)
|
||||
return task
|
||||
}
|
||||
|
||||
func (t *Task) run() error {
|
||||
if res := DmTaskRun(t.unmanaged); res != 1 {
|
||||
return ErrTaskRun
|
||||
}
|
||||
runtime.KeepAlive(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setName(name string) error {
|
||||
if res := DmTaskSetName(t.unmanaged, name); res != 1 {
|
||||
return ErrTaskSetName
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setMessage(message string) error {
|
||||
if res := DmTaskSetMessage(t.unmanaged, message); res != 1 {
|
||||
return ErrTaskSetMessage
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setSector(sector uint64) error {
|
||||
if res := DmTaskSetSector(t.unmanaged, sector); res != 1 {
|
||||
return ErrTaskSetSector
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setCookie(cookie *uint, flags uint16) error {
|
||||
if cookie == nil {
|
||||
return ErrNilCookie
|
||||
}
|
||||
if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 {
|
||||
return ErrTaskSetCookie
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) setAddNode(addNode AddNodeType) error {
|
||||
if addNode != addNodeOnResume && addNode != addNodeOnCreate {
|
||||
return ErrInvalidAddNode
|
||||
}
|
||||
if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 {
|
||||
return ErrTaskSetAddNode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) addTarget(start, size uint64, ttype, params string) error {
|
||||
if res := DmTaskAddTarget(t.unmanaged, start, size,
|
||||
ttype, params); res != 1 {
|
||||
return ErrTaskAddTarget
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) getDeps() (*Deps, error) { //nolint:unused
|
||||
var deps *Deps
|
||||
if deps = DmTaskGetDeps(t.unmanaged); deps == nil {
|
||||
return nil, ErrTaskGetDeps
|
||||
}
|
||||
return deps, nil
|
||||
}
|
||||
|
||||
func (t *Task) getInfo() (*Info, error) {
|
||||
info := &Info{}
|
||||
if res := DmTaskGetInfo(t.unmanaged, info); res != 1 {
|
||||
return nil, ErrTaskGetInfo
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (t *Task) getInfoWithDeferred() (*Info, error) {
|
||||
info := &Info{}
|
||||
if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 {
|
||||
return nil, ErrTaskGetInfo
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (t *Task) getDriverVersion() (string, error) {
|
||||
res := DmTaskGetDriverVersion(t.unmanaged)
|
||||
if res == "" {
|
||||
return "", ErrTaskGetDriverVersion
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
|
||||
length uint64, targetType string, params string,
|
||||
) {
|
||||
return DmGetNextTarget(t.unmanaged, next, &start, &length,
|
||||
&targetType, ¶ms),
|
||||
start, length, targetType, params
|
||||
}
|
||||
|
||||
// UdevWait waits for any processes that are waiting for udev to complete the specified cookie.
|
||||
func UdevWait(cookie *uint) error {
|
||||
if res := DmUdevWait(*cookie); res != 1 {
|
||||
logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res)
|
||||
return ErrUdevWait
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDevDir sets the dev folder for the device mapper library (usually /dev).
|
||||
func SetDevDir(dir string) error {
|
||||
if res := DmSetDevDir(dir); res != 1 {
|
||||
logrus.Debug("devicemapper: Error dm_set_dev_dir")
|
||||
return ErrSetDevDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLibraryVersion returns the device mapper library version.
|
||||
func GetLibraryVersion() (string, error) {
|
||||
var version string
|
||||
if res := DmGetLibraryVersion(&version); res != 1 {
|
||||
return "", ErrGetLibraryVersion
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// UdevSyncSupported returns whether device-mapper is able to sync with udev
|
||||
//
|
||||
// This is essential otherwise race conditions can arise where both udev and
|
||||
// device-mapper attempt to create and destroy devices.
|
||||
func UdevSyncSupported() bool {
|
||||
return DmUdevGetSyncSupport() != 0
|
||||
}
|
||||
|
||||
// UdevSetSyncSupport allows setting whether the udev sync should be enabled.
|
||||
// The return bool indicates the state of whether the sync is enabled.
|
||||
func UdevSetSyncSupport(enable bool) bool {
|
||||
if enable {
|
||||
DmUdevSetSyncSupport(1)
|
||||
} else {
|
||||
DmUdevSetSyncSupport(0)
|
||||
}
|
||||
|
||||
return UdevSyncSupported()
|
||||
}
|
||||
|
||||
// CookieSupported returns whether the version of device-mapper supports the
|
||||
// use of cookie's in the tasks.
|
||||
// This is largely a lower level call that other functions use.
|
||||
func CookieSupported() bool {
|
||||
return DmCookieSupported() != 0
|
||||
}
|
||||
|
||||
// RemoveDevice is a useful helper for cleaning up a device.
|
||||
func RemoveDevice(name string) error {
|
||||
task, err := TaskCreateNamed(deviceRemove, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
dmSawBusy = false // reset before the task is run
|
||||
dmSawEnxio = false
|
||||
if err = task.run(); err != nil {
|
||||
if dmSawBusy {
|
||||
return ErrBusy
|
||||
}
|
||||
if dmSawEnxio {
|
||||
return ErrEnxio
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred.
|
||||
func RemoveDeviceDeferred(name string) error {
|
||||
logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name)
|
||||
defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name)
|
||||
task, err := TaskCreateNamed(deviceRemove, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := DmTaskDeferredRemove(task.unmanaged); err != 1 {
|
||||
return ErrTaskDeferredRemove
|
||||
}
|
||||
|
||||
// set a task cookie and disable library fallback, or else libdevmapper will
|
||||
// disable udev dm rules and delete the symlink under /dev/mapper by itself,
|
||||
// even if the removal is deferred by the kernel.
|
||||
cookie := new(uint)
|
||||
flags := uint16(DmUdevDisableLibraryFallback)
|
||||
if err := task.setCookie(cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
|
||||
// libdevmapper and udev relies on System V semaphore for synchronization,
|
||||
// semaphores created in `task.setCookie` will be cleaned up in `UdevWait`.
|
||||
// So these two function call must come in pairs, otherwise semaphores will
|
||||
// be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem`
|
||||
// will be reached, which will eventually make all following calls to 'task.SetCookie'
|
||||
// fail.
|
||||
// this call will not wait for the deferred removal's final executing, since no
|
||||
// udev event will be generated, and the semaphore's value will not be incremented
|
||||
// by udev, what UdevWait is just cleaning up the semaphore.
|
||||
defer UdevWait(cookie)
|
||||
|
||||
dmSawEnxio = false
|
||||
if err = task.run(); err != nil {
|
||||
if dmSawEnxio {
|
||||
return ErrEnxio
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelDeferredRemove cancels a deferred remove for a device.
|
||||
func CancelDeferredRemove(deviceName string) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, deviceName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage("@cancel_deferred_remove"); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawBusy = false
|
||||
dmSawEnxio = false
|
||||
if err := task.run(); err != nil {
|
||||
// A device might be being deleted already
|
||||
if dmSawBusy {
|
||||
return ErrBusy
|
||||
} else if dmSawEnxio {
|
||||
return ErrEnxio
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockDeviceSize returns the size of a block device identified by the specified file.
|
||||
func GetBlockDeviceSize(file *os.File) (uint64, error) {
|
||||
size, err := ioctlBlkGetSize64(file.Fd())
|
||||
if err != nil {
|
||||
logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err)
|
||||
return 0, ErrGetBlockSize
|
||||
}
|
||||
return uint64(size), nil
|
||||
}
|
||||
|
||||
// BlockDeviceDiscard runs discard for the given path.
|
||||
// This is used as a workaround for the kernel not discarding block so
|
||||
// on the thin pool when we remove a thinp device, so we do it
|
||||
// manually
|
||||
func BlockDeviceDiscard(path string) error {
|
||||
file, err := os.OpenFile(path, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
size, err := GetBlockDeviceSize(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Without this sometimes the remove of the device that happens after
|
||||
// discard fails with EBUSY.
|
||||
unix.Sync()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreatePool is the programmatic example of "dmsetup create".
|
||||
// It creates a device with the specified poolName, data and metadata file and block size.
|
||||
func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
|
||||
task, err := TaskCreateNamed(deviceCreate, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
size, err := GetBlockDeviceSize(dataFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't get data size %s", err)
|
||||
}
|
||||
|
||||
params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
|
||||
if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag)
|
||||
if err := task.setCookie(cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReloadPool is the programmatic example of "dmsetup reload".
|
||||
// It reloads the table with the specified poolName, data and metadata file and block size.
|
||||
func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
|
||||
task, err := TaskCreateNamed(deviceReload, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
size, err := GetBlockDeviceSize(dataFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't get data size %s", err)
|
||||
}
|
||||
|
||||
params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
|
||||
if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
||||
}
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running ReloadPool %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDeps is the programmatic example of "dmsetup deps".
|
||||
// It outputs a list of devices referenced by the live table for the specified device.
|
||||
func GetDeps(name string) (*Deps, error) {
|
||||
task, err := TaskCreateNamed(deviceDeps, name)
|
||||
if task == nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return task.getDeps()
|
||||
}
|
||||
|
||||
// GetInfo is the programmatic example of "dmsetup info".
|
||||
// It outputs some brief information about the device.
|
||||
func GetInfo(name string) (*Info, error) {
|
||||
task, err := TaskCreateNamed(deviceInfo, name)
|
||||
if task == nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return task.getInfo()
|
||||
}
|
||||
|
||||
// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred.
|
||||
// It outputs some brief information about the device.
|
||||
func GetInfoWithDeferred(name string) (*Info, error) {
|
||||
task, err := TaskCreateNamed(deviceInfo, name)
|
||||
if task == nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return task.getInfoWithDeferred()
|
||||
}
|
||||
|
||||
// GetDriverVersion is the programmatic example of "dmsetup version".
|
||||
// It outputs version information of the driver.
|
||||
func GetDriverVersion() (string, error) {
|
||||
task := TaskCreate(deviceVersion)
|
||||
if task == nil {
|
||||
return "", fmt.Errorf("devicemapper: Can't create deviceVersion task")
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return task.getDriverVersion()
|
||||
}
|
||||
|
||||
// GetStatus is the programmatic example of "dmsetup status".
|
||||
// It outputs status information for the specified device name.
|
||||
func GetStatus(name string) (uint64, uint64, string, string, error) {
|
||||
task, err := TaskCreateNamed(deviceStatus, name)
|
||||
if task == nil {
|
||||
logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
|
||||
devinfo, err := task.getInfo()
|
||||
if err != nil {
|
||||
logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if devinfo.Exists == 0 {
|
||||
logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name)
|
||||
return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
|
||||
}
|
||||
|
||||
_, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
|
||||
return start, length, targetType, params, nil
|
||||
}
|
||||
|
||||
// GetTable is the programmatic example for "dmsetup table".
|
||||
// It outputs the current table for the specified device name.
|
||||
func GetTable(name string) (uint64, uint64, string, string, error) {
|
||||
task, err := TaskCreateNamed(deviceTable, name)
|
||||
if task == nil {
|
||||
logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
logrus.Debugf("devicemapper: GetTable() Error Run: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
|
||||
devinfo, err := task.getInfo()
|
||||
if err != nil {
|
||||
logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err)
|
||||
return 0, 0, "", "", err
|
||||
}
|
||||
if devinfo.Exists == 0 {
|
||||
logrus.Debugf("devicemapper: GetTable() Non existing device %s", name)
|
||||
return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
|
||||
}
|
||||
|
||||
_, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
|
||||
return start, length, targetType, params, nil
|
||||
}
|
||||
|
||||
// SetTransactionID sets a transaction id for the specified device name.
|
||||
func SetTransactionID(poolName string, oldID uint64, newID uint64) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SuspendDevice is the programmatic example of "dmsetup suspend".
|
||||
// It suspends the specified device.
|
||||
func SuspendDevice(name string) error {
|
||||
task, err := TaskCreateNamed(deviceSuspend, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResumeDevice is the programmatic example of "dmsetup resume".
|
||||
// It un-suspends the specified device.
|
||||
func ResumeDevice(name string) error {
|
||||
task, err := TaskCreateNamed(deviceResume, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateDevice creates a device with the specified poolName with the specified device id.
|
||||
func CreateDevice(poolName string, deviceID int) error {
|
||||
logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID)
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawExist = false // reset before the task is run
|
||||
if err := task.run(); err != nil {
|
||||
// Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
|
||||
if dmSawExist {
|
||||
return ErrDeviceIDExists
|
||||
}
|
||||
|
||||
return fmt.Errorf("devicemapper: Error running CreateDevice %s", err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteDevice deletes a device with the specified poolName with the specified device id.
|
||||
func DeleteDevice(poolName string, deviceID int) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawBusy = false
|
||||
if err := task.run(); err != nil {
|
||||
if dmSawBusy {
|
||||
return ErrBusy
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActivateDevice activates the device identified by the specified
|
||||
// poolName, name and deviceID with the specified size.
|
||||
func ActivateDevice(poolName string, name string, deviceID int, size uint64) error {
|
||||
return activateDevice(poolName, name, deviceID, size, "")
|
||||
}
|
||||
|
||||
// ActivateDeviceWithExternal activates the device identified by the specified
|
||||
// poolName, name and deviceID with the specified size.
|
||||
func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error {
|
||||
return activateDevice(poolName, name, deviceID, size, external)
|
||||
}
|
||||
|
||||
func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error {
|
||||
task, err := TaskCreateNamed(deviceCreate, name)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var params string
|
||||
if len(external) > 0 {
|
||||
params = fmt.Sprintf("%s %d %s", poolName, deviceID, external)
|
||||
} else {
|
||||
params = fmt.Sprintf("%s %d", poolName, deviceID)
|
||||
}
|
||||
if err := task.addTarget(0, size/512, "thin", params); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add target %s", err)
|
||||
}
|
||||
if err := task.setAddNode(addNodeOnCreate); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't add node %s", err)
|
||||
}
|
||||
|
||||
cookie := new(uint)
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active.
|
||||
func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error {
|
||||
task, err := TaskCreateNamed(deviceTargetMsg, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := task.setSector(0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
dmSawExist = false // reset before the task is run
|
||||
if err := task.run(); err != nil {
|
||||
// Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
|
||||
if dmSawExist {
|
||||
return ErrDeviceIDExists
|
||||
}
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId,
|
||||
func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error {
|
||||
devinfo, _ := GetInfo(baseName)
|
||||
doSuspend := devinfo != nil && devinfo.Exists != 0
|
||||
|
||||
if doSuspend {
|
||||
if err := SuspendDevice(baseName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil {
|
||||
if doSuspend {
|
||||
if err2 := ResumeDevice(baseName); err2 != nil {
|
||||
return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if doSuspend {
|
||||
if err := ResumeDevice(baseName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
123
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
123
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
|
|
@ -1,123 +0,0 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DevmapperLogger defines methods required to register as a callback for
|
||||
// logging events received from devicemapper. Note that devicemapper will send
|
||||
// *all* logs regardless to callbacks (including debug logs) so it's
|
||||
// recommended to not spam the console with the outputs.
|
||||
type DevmapperLogger interface {
|
||||
// DMLog is the logging callback containing all of the information from
|
||||
// devicemapper. The interface is identical to the C libdm counterpart.
|
||||
DMLog(level int, file string, line int, dmError int, message string)
|
||||
}
|
||||
|
||||
// dmLogger is the current logger in use that is being forwarded our messages.
|
||||
var dmLogger DevmapperLogger
|
||||
|
||||
// LogInit changes the logging callback called after processing libdm logs for
|
||||
// error message information. The default logger simply forwards all logs to
|
||||
// logrus. Calling LogInit(nil) disables the calling of callbacks.
|
||||
func LogInit(logger DevmapperLogger) {
|
||||
dmLogger = logger
|
||||
}
|
||||
|
||||
// Due to the way cgo works this has to be in a separate file, as devmapper.go has
|
||||
// definitions in the cgo block, which is incompatible with using "//export"
|
||||
|
||||
// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that
|
||||
// because we are using callbacks, this function will be called for *every* log
|
||||
// in libdm (even debug ones because there's no way of setting the verbosity
|
||||
// level for an external logging callback).
|
||||
//
|
||||
//export StorageDevmapperLogCallback
|
||||
func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) {
|
||||
msg := C.GoString(message)
|
||||
|
||||
// Track what errno libdm saw, because the library only gives us 0 or 1.
|
||||
if level < LogLevelDebug {
|
||||
if strings.Contains(msg, "busy") {
|
||||
dmSawBusy = true
|
||||
}
|
||||
|
||||
if strings.Contains(msg, "File exists") {
|
||||
dmSawExist = true
|
||||
}
|
||||
|
||||
if strings.Contains(msg, "No such device or address") {
|
||||
dmSawEnxio = true
|
||||
}
|
||||
}
|
||||
|
||||
if dmLogger != nil {
|
||||
dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg)
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultLogger is the default logger used by pkg/devicemapper. It forwards
|
||||
// all logs that are of higher or equal priority to the given level to the
|
||||
// corresponding logrus level.
|
||||
type DefaultLogger struct {
|
||||
// Level corresponds to the highest libdm level that will be forwarded to
|
||||
// logrus. In order to change this, register a new DefaultLogger.
|
||||
Level int
|
||||
}
|
||||
|
||||
// DMLog is the logging callback containing all of the information from
|
||||
// devicemapper. The interface is identical to the C libdm counterpart.
|
||||
func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) {
|
||||
if level <= l.Level {
|
||||
// Forward the log to the correct logrus level, if allowed by dmLogLevel.
|
||||
logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
||||
switch level {
|
||||
case LogLevelFatal, LogLevelErr:
|
||||
logrus.Error(logMsg)
|
||||
case LogLevelWarn:
|
||||
logrus.Warn(logMsg)
|
||||
case LogLevelNotice, LogLevelInfo:
|
||||
logrus.Info(logMsg)
|
||||
case LogLevelDebug:
|
||||
logrus.Debug(logMsg)
|
||||
default:
|
||||
// Don't drop any "unknown" levels.
|
||||
logrus.Info(logMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// registerLogCallback registers our own logging callback function for libdm
|
||||
// (which is StorageDevmapperLogCallback).
|
||||
//
|
||||
// Because libdm only gives us {0,1} error codes we need to parse the logs
|
||||
// produced by libdm (to set dmSawBusy and so on). Note that by registering a
|
||||
// callback using StorageDevmapperLogCallback, libdm will no longer output logs to
|
||||
// stderr so we have to log everything ourselves. None of this handling is
|
||||
// optional because we depend on log callbacks to parse the logs, and if we
|
||||
// don't forward the log information we'll be in a lot of trouble when
|
||||
// debugging things.
|
||||
func registerLogCallback() {
|
||||
LogWithErrnoInit()
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Use the default logger by default. We only allow LogLevelFatal by
|
||||
// default, because internally we mask a lot of libdm errors by retrying
|
||||
// and similar tricks. Also, libdm is very chatty and we don't want to
|
||||
// worry users for no reason.
|
||||
dmLogger = DefaultLogger{
|
||||
Level: LogLevelFatal,
|
||||
}
|
||||
|
||||
// Register as early as possible so we don't miss anything.
|
||||
registerLogCallback()
|
||||
}
|
||||
252
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
252
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
|
|
@ -1,252 +0,0 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
/*
|
||||
#define _GNU_SOURCE
|
||||
#include <libdevmapper.h>
|
||||
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
|
||||
|
||||
// FIXME: Can't we find a way to do the logging in pure Go?
|
||||
extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
|
||||
|
||||
static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
|
||||
{
|
||||
char *buffer = NULL;
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, f);
|
||||
ret = vasprintf(&buffer, f, ap);
|
||||
va_end(ap);
|
||||
if (ret < 0) {
|
||||
// memory allocation failed -- should never happen?
|
||||
return;
|
||||
}
|
||||
|
||||
StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
static void log_with_errno_init()
|
||||
{
|
||||
dm_log_with_errno_init(log_cb);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type (
|
||||
cdmTask C.struct_dm_task
|
||||
)
|
||||
|
||||
// IOCTL consts
|
||||
const (
|
||||
BlkGetSize64 = C.BLKGETSIZE64
|
||||
BlkDiscard = C.BLKDISCARD
|
||||
)
|
||||
|
||||
// Devicemapper cookie flags.
|
||||
const (
|
||||
DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG
|
||||
DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG
|
||||
DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG
|
||||
DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK
|
||||
)
|
||||
|
||||
// DeviceMapper mapped functions.
|
||||
var (
|
||||
DmGetLibraryVersion = dmGetLibraryVersionFct
|
||||
DmGetNextTarget = dmGetNextTargetFct
|
||||
DmSetDevDir = dmSetDevDirFct
|
||||
DmTaskAddTarget = dmTaskAddTargetFct
|
||||
DmTaskCreate = dmTaskCreateFct
|
||||
DmTaskDestroy = dmTaskDestroyFct
|
||||
DmTaskGetDeps = dmTaskGetDepsFct
|
||||
DmTaskGetInfo = dmTaskGetInfoFct
|
||||
DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
|
||||
DmTaskRun = dmTaskRunFct
|
||||
DmTaskSetAddNode = dmTaskSetAddNodeFct
|
||||
DmTaskSetCookie = dmTaskSetCookieFct
|
||||
DmTaskSetMessage = dmTaskSetMessageFct
|
||||
DmTaskSetName = dmTaskSetNameFct
|
||||
DmTaskSetRo = dmTaskSetRoFct
|
||||
DmTaskSetSector = dmTaskSetSectorFct
|
||||
DmUdevWait = dmUdevWaitFct
|
||||
DmUdevSetSyncSupport = dmUdevSetSyncSupportFct
|
||||
DmUdevGetSyncSupport = dmUdevGetSyncSupportFct
|
||||
DmCookieSupported = dmCookieSupportedFct
|
||||
LogWithErrnoInit = logWithErrnoInitFct
|
||||
DmTaskDeferredRemove = dmTaskDeferredRemoveFct
|
||||
DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct
|
||||
)
|
||||
|
||||
func free(p *C.char) {
|
||||
C.free(unsafe.Pointer(p))
|
||||
}
|
||||
|
||||
func dmTaskDestroyFct(task *cdmTask) {
|
||||
C.dm_task_destroy((*C.struct_dm_task)(task))
|
||||
}
|
||||
|
||||
func dmTaskCreateFct(taskType int) *cdmTask {
|
||||
return (*cdmTask)(C.dm_task_create(C.int(taskType)))
|
||||
}
|
||||
|
||||
func dmTaskRunFct(task *cdmTask) int {
|
||||
ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
|
||||
return int(ret)
|
||||
}
|
||||
|
||||
func dmTaskSetNameFct(task *cdmTask, name string) int {
|
||||
Cname := C.CString(name)
|
||||
defer free(Cname)
|
||||
|
||||
return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
|
||||
}
|
||||
|
||||
func dmTaskSetMessageFct(task *cdmTask, message string) int {
|
||||
Cmessage := C.CString(message)
|
||||
defer free(Cmessage)
|
||||
|
||||
return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
|
||||
}
|
||||
|
||||
func dmTaskSetSectorFct(task *cdmTask, sector uint64) int {
|
||||
return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
|
||||
}
|
||||
|
||||
func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int {
|
||||
cCookie := C.uint32_t(*cookie)
|
||||
defer func() {
|
||||
*cookie = uint(cCookie)
|
||||
}()
|
||||
return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
|
||||
}
|
||||
|
||||
func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int {
|
||||
return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
|
||||
}
|
||||
|
||||
func dmTaskSetRoFct(task *cdmTask) int {
|
||||
return int(C.dm_task_set_ro((*C.struct_dm_task)(task)))
|
||||
}
|
||||
|
||||
func dmTaskAddTargetFct(task *cdmTask,
|
||||
start, size uint64, ttype, params string,
|
||||
) int {
|
||||
Cttype := C.CString(ttype)
|
||||
defer free(Cttype)
|
||||
|
||||
Cparams := C.CString(params)
|
||||
defer free(Cparams)
|
||||
|
||||
return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
|
||||
}
|
||||
|
||||
func dmTaskGetDepsFct(task *cdmTask) *Deps {
|
||||
Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task))
|
||||
if Cdeps == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// golang issue: https://github.com/golang/go/issues/11925
|
||||
var devices []C.uint64_t
|
||||
devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices))
|
||||
devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps)))
|
||||
devicesHdr.Len = int(Cdeps.count)
|
||||
devicesHdr.Cap = int(Cdeps.count)
|
||||
|
||||
deps := &Deps{
|
||||
Count: uint32(Cdeps.count),
|
||||
Filler: uint32(Cdeps.filler),
|
||||
}
|
||||
for _, device := range devices {
|
||||
deps.Device = append(deps.Device, uint64(device))
|
||||
}
|
||||
return deps
|
||||
}
|
||||
|
||||
func dmTaskGetInfoFct(task *cdmTask, info *Info) int {
|
||||
Cinfo := C.struct_dm_info{}
|
||||
defer func() {
|
||||
info.Exists = int(Cinfo.exists)
|
||||
info.Suspended = int(Cinfo.suspended)
|
||||
info.LiveTable = int(Cinfo.live_table)
|
||||
info.InactiveTable = int(Cinfo.inactive_table)
|
||||
info.OpenCount = int32(Cinfo.open_count)
|
||||
info.EventNr = uint32(Cinfo.event_nr)
|
||||
info.Major = uint32(Cinfo.major)
|
||||
info.Minor = uint32(Cinfo.minor)
|
||||
info.ReadOnly = int(Cinfo.read_only)
|
||||
info.TargetCount = int32(Cinfo.target_count)
|
||||
}()
|
||||
return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
|
||||
}
|
||||
|
||||
func dmTaskGetDriverVersionFct(task *cdmTask) string {
|
||||
buffer := C.malloc(128)
|
||||
defer C.free(buffer)
|
||||
res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128)
|
||||
if res == 0 {
|
||||
return ""
|
||||
}
|
||||
return C.GoString((*C.char)(buffer))
|
||||
}
|
||||
|
||||
func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer {
|
||||
var (
|
||||
Cstart, Clength C.uint64_t
|
||||
CtargetType, Cparams *C.char
|
||||
)
|
||||
defer func() {
|
||||
*start = uint64(Cstart)
|
||||
*length = uint64(Clength)
|
||||
*target = C.GoString(CtargetType)
|
||||
*params = C.GoString(Cparams)
|
||||
}()
|
||||
|
||||
nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams)
|
||||
return nextp
|
||||
}
|
||||
|
||||
func dmUdevSetSyncSupportFct(syncWithUdev int) {
|
||||
(C.dm_udev_set_sync_support(C.int(syncWithUdev)))
|
||||
}
|
||||
|
||||
func dmUdevGetSyncSupportFct() int {
|
||||
return int(C.dm_udev_get_sync_support())
|
||||
}
|
||||
|
||||
func dmUdevWaitFct(cookie uint) int {
|
||||
return int(C.dm_udev_wait(C.uint32_t(cookie)))
|
||||
}
|
||||
|
||||
func dmCookieSupportedFct() int {
|
||||
return int(C.dm_cookie_supported())
|
||||
}
|
||||
|
||||
func logWithErrnoInitFct() {
|
||||
C.log_with_errno_init()
|
||||
}
|
||||
|
||||
func dmSetDevDirFct(dir string) int {
|
||||
Cdir := C.CString(dir)
|
||||
defer free(Cdir)
|
||||
|
||||
return int(C.dm_set_dev_dir(Cdir))
|
||||
}
|
||||
|
||||
func dmGetLibraryVersionFct(version *string) int {
|
||||
buffer := C.CString(string(make([]byte, 128)))
|
||||
defer free(buffer)
|
||||
defer func() {
|
||||
*version = C.GoString(buffer)
|
||||
}()
|
||||
return int(C.dm_get_library_version(buffer, 128))
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
//go:build linux && cgo && !libdm_no_deferred_remove
|
||||
// +build linux,cgo,!libdm_no_deferred_remove
|
||||
|
||||
package devicemapper
|
||||
|
||||
// #include <libdevmapper.h>
|
||||
import "C"
|
||||
|
||||
// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
|
||||
const LibraryDeferredRemovalSupport = true
|
||||
|
||||
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
||||
return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task)))
|
||||
}
|
||||
|
||||
func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
|
||||
Cinfo := C.struct_dm_info{}
|
||||
defer func() {
|
||||
info.Exists = int(Cinfo.exists)
|
||||
info.Suspended = int(Cinfo.suspended)
|
||||
info.LiveTable = int(Cinfo.live_table)
|
||||
info.InactiveTable = int(Cinfo.inactive_table)
|
||||
info.OpenCount = int32(Cinfo.open_count)
|
||||
info.EventNr = uint32(Cinfo.event_nr)
|
||||
info.Major = uint32(Cinfo.major)
|
||||
info.Minor = uint32(Cinfo.minor)
|
||||
info.ReadOnly = int(Cinfo.read_only)
|
||||
info.TargetCount = int32(Cinfo.target_count)
|
||||
info.DeferredRemove = int(Cinfo.deferred_remove)
|
||||
}()
|
||||
return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
|
||||
}
|
||||
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
|
|
@ -1,7 +0,0 @@
|
|||
//go:build linux && cgo && !static_build
|
||||
// +build linux,cgo,!static_build
|
||||
|
||||
package devicemapper
|
||||
|
||||
// #cgo pkg-config: devmapper
|
||||
import "C"
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
//go:build linux && cgo && libdm_no_deferred_remove
|
||||
// +build linux,cgo,libdm_no_deferred_remove
|
||||
|
||||
package devicemapper
|
||||
|
||||
// LibraryDeferredRemovalSupport tells if the feature is enabled in the build
|
||||
const LibraryDeferredRemovalSupport = false
|
||||
|
||||
func dmTaskDeferredRemoveFct(task *cdmTask) int {
|
||||
// Error. Nobody should be calling it.
|
||||
return -1
|
||||
}
|
||||
|
||||
func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
|
||||
return -1
|
||||
}
|
||||
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
7
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
|
|
@ -1,7 +0,0 @@
|
|||
//go:build linux && cgo && static_build
|
||||
// +build linux,cgo,static_build
|
||||
|
||||
package devicemapper
|
||||
|
||||
// #cgo pkg-config: --static devmapper
|
||||
import "C"
|
||||
29
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
29
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
|
|
@ -1,29 +0,0 @@
|
|||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func ioctlBlkGetSize64(fd uintptr) (int64, error) {
|
||||
var size int64
|
||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
|
||||
return 0, err
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func ioctlBlkDiscard(fd uintptr, offset, length uint64) error {
|
||||
var r [2]uint64
|
||||
r[0] = offset
|
||||
r[1] = length
|
||||
|
||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
11
vendor/github.com/containers/storage/pkg/devicemapper/log.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/devicemapper/log.go
generated
vendored
|
|
@ -1,11 +0,0 @@
|
|||
package devicemapper
|
||||
|
||||
// definitions from lvm2 lib/log/log.h
|
||||
const (
|
||||
LogLevelFatal = 2 + iota // _LOG_FATAL
|
||||
LogLevelErr // _LOG_ERR
|
||||
LogLevelWarn // _LOG_WARN
|
||||
LogLevelNotice // _LOG_NOTICE
|
||||
LogLevelInfo // _LOG_INFO
|
||||
LogLevelDebug // _LOG_DEBUG
|
||||
)
|
||||
21
vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
generated
vendored
21
vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
generated
vendored
|
|
@ -1,21 +0,0 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package dmesg
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Dmesg returns last messages from the kernel log, up to size bytes
|
||||
func Dmesg(size int) []byte {
|
||||
t := uintptr(3) // SYSLOG_ACTION_READ_ALL
|
||||
b := make([]byte, size)
|
||||
amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
|
||||
if err != 0 {
|
||||
return []byte{}
|
||||
}
|
||||
return b[:amt]
|
||||
}
|
||||
34
vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
generated
vendored
Normal file
34
vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package fileutils
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Exists checks whether a file or directory exists at the given path.
|
||||
// If the path is a symlink, the symlink is followed.
|
||||
func Exists(path string) error {
|
||||
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
|
||||
// simply checking the existence of a file.
|
||||
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "faccessat", Path: path, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lexists checks whether a file or directory exists at the given path.
|
||||
// If the path is a symlink, the symlink itself is checked.
|
||||
func Lexists(path string) error {
|
||||
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
|
||||
// simply checking the existence of a file.
|
||||
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "faccessat", Path: path, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
18
vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go
generated
vendored
Normal file
18
vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
package fileutils
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Exists checks whether a file or directory exists at the given path.
|
||||
func Exists(path string) error {
|
||||
_, err := os.Stat(path)
|
||||
return err
|
||||
}
|
||||
|
||||
// Lexists checks whether a file or directory exists at the given path, without
|
||||
// resolving symlinks
|
||||
func Lexists(path string) error {
|
||||
_, err := os.Lstat(path)
|
||||
return err
|
||||
}
|
||||
4
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
|
|
@ -344,7 +344,7 @@ func ReadSymlinkedPath(path string) (realPath string, err error) {
|
|||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err)
|
||||
}
|
||||
if _, err := os.Stat(realPath); err != nil {
|
||||
if err := Exists(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err)
|
||||
}
|
||||
return realPath, nil
|
||||
|
|
@ -352,7 +352,7 @@ func ReadSymlinkedPath(path string) (realPath string, err error) {
|
|||
|
||||
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||
func CreateIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if err := Exists(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0o755)
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
|
|
@ -228,7 +228,7 @@ func getOverflowUID() int {
|
|||
return overflowUID
|
||||
}
|
||||
|
||||
// getOverflowUID returns the GID mapped to the overflow user
|
||||
// getOverflowGID returns the GID mapped to the overflow user
|
||||
func getOverflowGID() int {
|
||||
overflowGIDOnce.Do(func() {
|
||||
// 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
|
|
@ -13,6 +13,7 @@ import (
|
|||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/moby/sys/user"
|
||||
)
|
||||
|
|
@ -55,7 +56,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
|||
if dirPath == "/" {
|
||||
break
|
||||
}
|
||||
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(dirPath); err != nil && os.IsNotExist(err) {
|
||||
paths = append(paths, dirPath)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
56
vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
generated
vendored
56
vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
generated
vendored
|
|
@ -133,11 +133,25 @@ func (l *LockFile) Lock() {
|
|||
}
|
||||
}
|
||||
|
||||
// LockRead locks the lockfile as a reader.
|
||||
// RLock locks the lockfile as a reader.
|
||||
func (l *LockFile) RLock() {
|
||||
l.lock(readLock)
|
||||
}
|
||||
|
||||
// TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one.
|
||||
func (l *LockFile) TryLock() error {
|
||||
if l.ro {
|
||||
panic("can't take write lock on read-only lock file")
|
||||
} else {
|
||||
return l.tryLock(writeLock)
|
||||
}
|
||||
}
|
||||
|
||||
// TryRLock attempts to lock the lockfile as a reader.
|
||||
func (l *LockFile) TryRLock() error {
|
||||
return l.tryLock(readLock)
|
||||
}
|
||||
|
||||
// Unlock unlocks the lockfile.
|
||||
func (l *LockFile) Unlock() {
|
||||
l.stateMutex.Lock()
|
||||
|
|
@ -401,9 +415,47 @@ func (l *LockFile) lock(lType lockType) {
|
|||
// Optimization: only use the (expensive) syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
lockHandle(l.fd, lType)
|
||||
lockHandle(l.fd, lType, false)
|
||||
}
|
||||
l.lockType = lType
|
||||
l.locked = true
|
||||
l.counter++
|
||||
}
|
||||
|
||||
// lock locks the lockfile via syscall based on the specified type and
|
||||
// command.
|
||||
func (l *LockFile) tryLock(lType lockType) error {
|
||||
var success bool
|
||||
if lType == readLock {
|
||||
success = l.rwMutex.TryRLock()
|
||||
} else {
|
||||
success = l.rwMutex.TryLock()
|
||||
}
|
||||
if !success {
|
||||
return fmt.Errorf("resource temporarily unavailable")
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
defer l.stateMutex.Unlock()
|
||||
if l.counter == 0 {
|
||||
// If we're the first reference on the lock, we need to open the file again.
|
||||
fd, err := openLock(l.file, l.ro)
|
||||
if err != nil {
|
||||
l.rwMutex.Unlock()
|
||||
return err
|
||||
}
|
||||
l.fd = fd
|
||||
|
||||
// Optimization: only use the (expensive) syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
if err = lockHandle(l.fd, lType, true); err != nil {
|
||||
closeHandle(fd)
|
||||
l.rwMutex.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.lockType = lType
|
||||
l.locked = true
|
||||
l.counter++
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
16
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
16
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
|
|
@ -74,7 +74,7 @@ func openHandle(path string, mode int) (fileHandle, error) {
|
|||
return fileHandle(fd), err
|
||||
}
|
||||
|
||||
func lockHandle(fd fileHandle, lType lockType) {
|
||||
func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
|
||||
fType := unix.F_RDLCK
|
||||
if lType != readLock {
|
||||
fType = unix.F_WRLCK
|
||||
|
|
@ -85,7 +85,15 @@ func lockHandle(fd fileHandle, lType lockType) {
|
|||
Start: 0,
|
||||
Len: 0,
|
||||
}
|
||||
for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil {
|
||||
cmd := unix.F_SETLKW
|
||||
if nonblocking {
|
||||
cmd = unix.F_SETLK
|
||||
}
|
||||
for {
|
||||
err := unix.FcntlFlock(uintptr(fd), cmd, &lk)
|
||||
if err == nil || nonblocking {
|
||||
return err
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
|
@ -93,3 +101,7 @@ func lockHandle(fd fileHandle, lType lockType) {
|
|||
func unlockAndCloseHandle(fd fileHandle) {
|
||||
unix.Close(int(fd))
|
||||
}
|
||||
|
||||
func closeHandle(fd fileHandle) {
|
||||
unix.Close(int(fd))
|
||||
}
|
||||
|
|
|
|||
13
vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
generated
vendored
|
|
@ -81,19 +81,30 @@ func openHandle(path string, mode int) (fileHandle, error) {
|
|||
return fileHandle(fd), err
|
||||
}
|
||||
|
||||
func lockHandle(fd fileHandle, lType lockType) {
|
||||
func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
|
||||
flags := 0
|
||||
if lType != readLock {
|
||||
flags = windows.LOCKFILE_EXCLUSIVE_LOCK
|
||||
}
|
||||
if nonblocking {
|
||||
flags |= windows.LOCKFILE_FAIL_IMMEDIATELY
|
||||
}
|
||||
ol := new(windows.Overlapped)
|
||||
if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
|
||||
if nonblocking {
|
||||
return err
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockAndCloseHandle(fd fileHandle) {
|
||||
ol := new(windows.Overlapped)
|
||||
windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
|
||||
closeHandle(fd)
|
||||
}
|
||||
|
||||
func closeHandle(fd fileHandle) {
|
||||
windows.Close(windows.Handle(fd))
|
||||
}
|
||||
|
|
|
|||
75
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
generated
vendored
75
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go
generated
vendored
|
|
@ -1,75 +0,0 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Package kernel provides helper function to get, parse and compare kernel
|
||||
// versions for different platforms.
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// VersionInfo holds information about the kernel.
|
||||
type VersionInfo struct {
|
||||
Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4)
|
||||
Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1)
|
||||
Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2)
|
||||
Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic)
|
||||
}
|
||||
|
||||
func (k *VersionInfo) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
|
||||
}
|
||||
|
||||
// CompareKernelVersion compares two kernel.VersionInfo structs.
|
||||
// Returns -1 if a < b, 0 if a == b, 1 it a > b
|
||||
func CompareKernelVersion(a, b VersionInfo) int {
|
||||
if a.Kernel < b.Kernel {
|
||||
return -1
|
||||
} else if a.Kernel > b.Kernel {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Major < b.Major {
|
||||
return -1
|
||||
} else if a.Major > b.Major {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Minor < b.Minor {
|
||||
return -1
|
||||
} else if a.Minor > b.Minor {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ParseRelease parses a string and creates a VersionInfo based on it.
|
||||
func ParseRelease(release string) (*VersionInfo, error) {
|
||||
var (
|
||||
kernel, major, minor, parsed int
|
||||
flavor, partial string
|
||||
)
|
||||
|
||||
// Ignore error from Sscanf to allow an empty flavor. Instead, just
|
||||
// make sure we got all the version numbers.
|
||||
parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
|
||||
if parsed < 2 {
|
||||
return nil, errors.New("Can't parse kernel version " + release)
|
||||
}
|
||||
|
||||
// sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
|
||||
parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
|
||||
if parsed < 1 {
|
||||
flavor = partial
|
||||
}
|
||||
|
||||
return &VersionInfo{
|
||||
Kernel: kernel,
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
Flavor: flavor,
|
||||
}, nil
|
||||
}
|
||||
57
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
generated
vendored
57
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go
generated
vendored
|
|
@ -1,57 +0,0 @@
|
|||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
// Package kernel provides helper function to get, parse and compare kernel
|
||||
// versions for different platforms.
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/mattn/go-shellwords"
|
||||
)
|
||||
|
||||
// GetKernelVersion gets the current kernel version.
|
||||
func GetKernelVersion() (*VersionInfo, error) {
|
||||
release, err := getRelease()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ParseRelease(release)
|
||||
}
|
||||
|
||||
// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version
|
||||
func getRelease() (string, error) {
|
||||
cmd := exec.Command("system_profiler", "SPSoftwareDataType")
|
||||
osName, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var release string
|
||||
data := strings.Split(string(osName), "\n")
|
||||
for _, line := range data {
|
||||
if strings.Contains(line, "Kernel Version") {
|
||||
// It has the format like ' Kernel Version: Darwin 14.5.0'
|
||||
content := strings.SplitN(line, ":", 2)
|
||||
if len(content) != 2 {
|
||||
return "", fmt.Errorf("kernel version is invalid")
|
||||
}
|
||||
|
||||
prettyNames, err := shellwords.Parse(content[1])
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("kernel version is invalid: %w", err)
|
||||
}
|
||||
|
||||
if len(prettyNames) != 2 {
|
||||
return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ")
|
||||
}
|
||||
release = prettyNames[1]
|
||||
}
|
||||
}
|
||||
|
||||
return release, nil
|
||||
}
|
||||
46
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
generated
vendored
46
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
generated
vendored
|
|
@ -1,46 +0,0 @@
|
|||
//go:build linux || freebsd || solaris || openbsd
|
||||
// +build linux freebsd solaris openbsd
|
||||
|
||||
// Package kernel provides helper function to get, parse and compare kernel
|
||||
// versions for different platforms.
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetKernelVersion gets the current kernel version.
|
||||
func GetKernelVersion() (*VersionInfo, error) {
|
||||
uts, err := uname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
release := make([]byte, len(uts.Release))
|
||||
|
||||
i := 0
|
||||
for _, c := range uts.Release {
|
||||
release[i] = byte(c)
|
||||
i++
|
||||
}
|
||||
|
||||
// Remove the \x00 from the release for Atoi to parse correctly
|
||||
release = release[:bytes.IndexByte(release, 0)]
|
||||
|
||||
return ParseRelease(string(release))
|
||||
}
|
||||
|
||||
// CheckKernelVersion checks if current kernel is newer than (or equal to)
|
||||
// the given version.
|
||||
func CheckKernelVersion(k, major, minor int) bool {
|
||||
if v, err := GetKernelVersion(); err != nil {
|
||||
logrus.Warnf("Error getting kernel version: %s", err)
|
||||
} else {
|
||||
if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
70
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
70
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
|
|
@ -1,70 +0,0 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// VersionInfo holds information about the kernel.
|
||||
type VersionInfo struct {
|
||||
kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6)
|
||||
major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1)
|
||||
minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601)
|
||||
build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592)
|
||||
}
|
||||
|
||||
func (k *VersionInfo) String() string {
|
||||
return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
|
||||
}
|
||||
|
||||
// GetKernelVersion gets the current kernel version.
|
||||
func GetKernelVersion() (*VersionInfo, error) {
|
||||
var (
|
||||
h windows.Handle
|
||||
dwVersion uint32
|
||||
err error
|
||||
)
|
||||
|
||||
KVI := &VersionInfo{"Unknown", 0, 0, 0}
|
||||
|
||||
if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE,
|
||||
windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
|
||||
0,
|
||||
windows.KEY_READ,
|
||||
&h); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
defer windows.RegCloseKey(h)
|
||||
|
||||
var buf [1 << 10]uint16
|
||||
var typ uint32
|
||||
n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
|
||||
|
||||
if err = windows.RegQueryValueEx(h,
|
||||
windows.StringToUTF16Ptr("BuildLabEx"),
|
||||
nil,
|
||||
&typ,
|
||||
(*byte)(unsafe.Pointer(&buf[0])),
|
||||
&n); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
|
||||
KVI.kvi = windows.UTF16ToString(buf[:])
|
||||
|
||||
// Important - docker.exe MUST be manifested for this API to return
|
||||
// the correct information.
|
||||
if dwVersion, err = windows.GetVersion(); err != nil {
|
||||
return KVI, err
|
||||
}
|
||||
|
||||
KVI.major = int(dwVersion & 0xFF)
|
||||
KVI.minor = int((dwVersion & 0xFF00) >> 8)
|
||||
KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
|
||||
|
||||
return KVI, nil
|
||||
}
|
||||
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go
generated
vendored
|
|
@ -1,17 +0,0 @@
|
|||
package kernel
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is passthrough for unix.Utsname in order to make it portable with
|
||||
// other platforms where it is not available.
|
||||
type Utsname unix.Utsname
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
||||
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
generated
vendored
17
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go
generated
vendored
|
|
@ -1,17 +0,0 @@
|
|||
package kernel
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is passthrough for unix.Utsname in order to make it portable with
|
||||
// other platforms where it is not available.
|
||||
type Utsname unix.Utsname
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
||||
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go
generated
vendored
|
|
@ -1,14 +0,0 @@
|
|||
package kernel
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func uname() (*unix.Utsname, error) {
|
||||
uts := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uts, nil
|
||||
}
|
||||
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go
generated
vendored
|
|
@ -1,14 +0,0 @@
|
|||
//go:build openbsd
|
||||
// +build openbsd
|
||||
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// A stub called by kernel_unix.go .
|
||||
func uname() (*Utsname, error) {
|
||||
return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS)
|
||||
}
|
||||
11
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go
generated
vendored
|
|
@ -1,11 +0,0 @@
|
|||
//go:build !linux && !solaris && !freebsd
|
||||
// +build !linux,!solaris,!freebsd
|
||||
|
||||
package kernel
|
||||
|
||||
// Utsname represents the system name structure.
|
||||
// It is defined here to make it portable as it is available on linux but not
|
||||
// on windows.
|
||||
type Utsname struct {
|
||||
Release [65]byte
|
||||
}
|
||||
1
vendor/github.com/containers/storage/pkg/unshare/unshare.c
generated
vendored
1
vendor/github.com/containers/storage/pkg/unshare/unshare.c
generated
vendored
|
|
@ -15,6 +15,7 @@
|
|||
#include <termios.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <libgen.h>
|
||||
#include <sys/vfs.h>
|
||||
#include <sys/mount.h>
|
||||
#include <linux/limits.h>
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue