go.mod: update osbuild/images to v0.156.0
tag v0.155.0 Tagger: imagebuilder-bot <imagebuilder-bots+imagebuilder-bot@redhat.com> Changes with 0.155.0 ---------------- * Fedora 43: add shadow-utils when LockRoot is enabled, update cloud-init service name (osbuild/images#1618) * Author: Achilleas Koutsou, Reviewers: Gianluca Zuccarelli, Michael Vogt * Update osbuild dependency commit ID to latest (osbuild/images#1609) * Author: SchutzBot, Reviewers: Achilleas Koutsou, Simon de Vlieger, Tomáš Hozza * Update snapshots to 20250626 (osbuild/images#1623) * Author: SchutzBot, Reviewers: Achilleas Koutsou, Simon de Vlieger * distro/rhel9: xz compress azure-cvm image type [HMS-8587] (osbuild/images#1620) * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza * distro/rhel: introduce new image type: Azure SAP Apps [HMS-8738] (osbuild/images#1612) * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza * distro/rhel: move ansible-core to sap_extras_pkgset (osbuild/images#1624) * Author: Achilleas Koutsou, Reviewers: Brian C. Lane, Tomáš Hozza * github/create-tag: allow passing the version when run manually (osbuild/images#1621) * Author: Achilleas Koutsou, Reviewers: Lukáš Zapletal, Tomáš Hozza * rhel9: move image-config into pure YAML (HMS-8593) (osbuild/images#1616) * Author: Michael Vogt, Reviewers: Achilleas Koutsou, Simon de Vlieger * test: split manifest checksums into separate files (osbuild/images#1625) * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza — Somewhere on the Internet, 2025-06-30 --- tag v0.156.0 Tagger: imagebuilder-bot <imagebuilder-bots+imagebuilder-bot@redhat.com> Changes with 0.156.0 ---------------- * Many: delete repositories for EOL distributions (HMS-7044) (osbuild/images#1607) * Author: Tomáš Hozza, Reviewers: Michael Vogt, Simon de Vlieger * RHSM/facts: add 'image-builder CLI' API type (osbuild/images#1640) * Author: Tomáš Hozza, Reviewers: Brian C. Lane, Simon de Vlieger * Update dependencies 2025-06-29 (osbuild/images#1628) * Author: SchutzBot, Reviewers: Simon de Vlieger, Tomáš Hozza * Update osbuild dependency commit ID to latest (osbuild/images#1627) * Author: SchutzBot, Reviewers: Simon de Vlieger, Tomáš Hozza * [RFC] image: drop `InstallWeakDeps` from image.DiskImage (osbuild/images#1642) * Author: Michael Vogt, Reviewers: Brian C. Lane, Simon de Vlieger, Tomáš Hozza * build(deps): bump the go-deps group across 1 directory with 3 updates (osbuild/images#1632) * Author: dependabot[bot], Reviewers: SchutzBot, Tomáš Hozza * distro/rhel10: xz compress azure-cvm image type (osbuild/images#1638) * Author: Achilleas Koutsou, Reviewers: Brian C. Lane, Simon de Vlieger * distro: cleanup/refactor distro/{defs,generic} (HMS-8744) (osbuild/images#1570) * Author: Michael Vogt, Reviewers: Simon de Vlieger, Tomáš Hozza * distro: remove some hardcoded values from generic/images.go (osbuild/images#1636) * Author: Michael Vogt, Reviewers: Simon de Vlieger, Tomáš Hozza * distro: small tweaks for the YAML based imagetypes (osbuild/images#1622) * Author: Michael Vogt, Reviewers: Brian C. Lane, Simon de Vlieger * fedora/wsl: packages and locale (osbuild/images#1635) * Author: Simon de Vlieger, Reviewers: Michael Vogt, Tomáš Hozza * image/many: make compression more generic (osbuild/images#1634) * Author: Simon de Vlieger, Reviewers: Brian C. Lane, Michael Vogt * manifest: handle content template name with spaces (osbuild/images#1641) * Author: Bryttanie, Reviewers: Brian C. Lane, Michael Vogt, Tomáš Hozza * many: implement gzip (osbuild/images#1633) * Author: Simon de Vlieger, Reviewers: Michael Vogt, Tomáš Hozza * rhel/azure: set GRUB_TERMINAL based on architecture [RHEL-91383] (osbuild/images#1626) * Author: Achilleas Koutsou, Reviewers: Simon de Vlieger, Tomáš Hozza — Somewhere on the Internet, 2025-07-07 ---
This commit is contained in:
parent
60c5f10af8
commit
3fd7092db5
1486 changed files with 124742 additions and 82516 deletions
121
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
121
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
|
|
@ -16,6 +16,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
|
|
@ -52,7 +53,7 @@ type (
|
|||
// This is additional data to be used by the converter. It will
|
||||
// not survive a round trip through JSON, so it's primarily
|
||||
// intended for generating archives (i.e., converting writes).
|
||||
WhiteoutData interface{}
|
||||
WhiteoutData any
|
||||
// When unpacking, specifies whether overwriting a directory with a
|
||||
// non-directory is allowed and vice versa.
|
||||
NoOverwriteDirNonDir bool
|
||||
|
|
@ -67,6 +68,8 @@ type (
|
|||
CopyPass bool
|
||||
// ForceMask, if set, indicates the permission mask used for created files.
|
||||
ForceMask *os.FileMode
|
||||
// Timestamp, if set, will be set in each header as create/mod/access time
|
||||
Timestamp *time.Time
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -78,10 +81,9 @@ const (
|
|||
windows = "windows"
|
||||
darwin = "darwin"
|
||||
freebsd = "freebsd"
|
||||
linux = "linux"
|
||||
)
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
var xattrsToIgnore = map[string]any{
|
||||
"security.selinux": true,
|
||||
}
|
||||
|
||||
|
|
@ -179,6 +181,7 @@ func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
|
|||
|
||||
defer func() {
|
||||
if Err != nil {
|
||||
// In the normal case, the buffer is embedded in the ReadCloser return.
|
||||
p.Put(buf)
|
||||
}
|
||||
}()
|
||||
|
|
@ -375,7 +378,7 @@ type nosysFileInfo struct {
|
|||
os.FileInfo
|
||||
}
|
||||
|
||||
func (fi nosysFileInfo) Sys() interface{} {
|
||||
func (fi nosysFileInfo) Sys() any {
|
||||
// A Sys value of type *tar.Header is safe as it is system-independent.
|
||||
// The tar.FileInfoHeader function copies the fields into the returned
|
||||
// header without performing any OS lookups.
|
||||
|
|
@ -475,7 +478,7 @@ type TarWhiteoutConverter interface {
|
|||
ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error)
|
||||
}
|
||||
|
||||
type tarAppender struct {
|
||||
type tarWriter struct {
|
||||
TarWriter *tar.Writer
|
||||
Buffer *bufio.Writer
|
||||
|
||||
|
|
@ -494,15 +497,19 @@ type tarAppender struct {
|
|||
// from the traditional behavior/format to get features like subsecond
|
||||
// precision in timestamps.
|
||||
CopyPass bool
|
||||
|
||||
// Timestamp, if set, will be set in each header as create/mod/access time
|
||||
Timestamp *time.Time
|
||||
}
|
||||
|
||||
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
|
||||
return &tarAppender{
|
||||
func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter {
|
||||
return &tarWriter{
|
||||
SeenFiles: make(map[uint64]string),
|
||||
TarWriter: tar.NewWriter(writer),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
IDMappings: idMapping,
|
||||
ChownOpts: chownOpts,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -521,8 +528,8 @@ func canonicalTarName(name string, isDir bool) (string, error) {
|
|||
return name, nil
|
||||
}
|
||||
|
||||
// addTarFile adds to the tar archive a file from `path` as `name`
|
||||
func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
// addFile adds a file from `path` as `name` to the tar archive.
|
||||
func (ta *tarWriter) addFile(path, name string) error {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -600,6 +607,13 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
hdr.Gname = ""
|
||||
}
|
||||
|
||||
// if override timestamp set, replace all times with this
|
||||
if ta.Timestamp != nil {
|
||||
hdr.ModTime = *ta.Timestamp
|
||||
hdr.AccessTime = *ta.Timestamp
|
||||
hdr.ChangeTime = *ta.Timestamp
|
||||
}
|
||||
|
||||
maybeTruncateHeaderModTime(hdr)
|
||||
|
||||
if ta.WhiteoutConverter != nil {
|
||||
|
|
@ -650,7 +664,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
|
||||
func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
|
||||
// hdr.Mode is in linux format, which we can use for sycalls,
|
||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
|
|
@ -673,7 +687,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
case tar.TypeDir:
|
||||
// Create directory unless it exists as a directory already.
|
||||
// In that case we just want to merge the two
|
||||
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
|
||||
if fi, err := os.Lstat(path); err != nil || !fi.IsDir() {
|
||||
if err := os.Mkdir(path, mask); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -691,7 +705,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
if err := file.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar:
|
||||
if inUserns { // cannot create devices in a userns
|
||||
|
|
@ -845,41 +861,39 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
|||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil && result == nil {
|
||||
result = err
|
||||
}
|
||||
}()
|
||||
|
||||
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
compressWriter, err := CompressStream(dest, options.Compression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compressWriter, err := CompressStream(pipeWriter, options.Compression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
ta := newTarAppender(
|
||||
ta := newTarWriter(
|
||||
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||
compressWriter,
|
||||
options.ChownOpts,
|
||||
options.Timestamp,
|
||||
)
|
||||
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
ta.CopyPass = options.CopyPass
|
||||
|
||||
includeFiles := options.IncludeFiles
|
||||
defer func() {
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Errorf("Can't close tar writer: %s", err)
|
||||
}
|
||||
if err := compressWriter.Close(); err != nil {
|
||||
logrus.Errorf("Can't close compress writer: %s", err)
|
||||
}
|
||||
if err := pipeWriter.Close(); err != nil {
|
||||
logrus.Errorf("Can't close pipe writer: %s", err)
|
||||
if err := compressWriter.Close(); err != nil && result == nil {
|
||||
result = err
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -893,7 +907,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
|
||||
stat, err := os.Lstat(srcPath)
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
|
|
@ -901,22 +915,22 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
// 'walk' will error if "file/." is stat-ed and "file" is not a
|
||||
// directory. So, we must split the source path and use the
|
||||
// basename as the include.
|
||||
if len(options.IncludeFiles) > 0 {
|
||||
if len(includeFiles) > 0 {
|
||||
logrus.Warn("Tar: Can't archive a file with includes")
|
||||
}
|
||||
|
||||
dir, base := SplitPathDirEntry(srcPath)
|
||||
srcPath = dir
|
||||
options.IncludeFiles = []string{base}
|
||||
includeFiles = []string{base}
|
||||
}
|
||||
|
||||
if len(options.IncludeFiles) == 0 {
|
||||
options.IncludeFiles = []string{"."}
|
||||
if len(includeFiles) == 0 {
|
||||
includeFiles = []string{"."}
|
||||
}
|
||||
|
||||
seen := make(map[string]bool)
|
||||
|
||||
for _, include := range options.IncludeFiles {
|
||||
for _, include := range includeFiles {
|
||||
rebaseName := options.RebaseNames[include]
|
||||
|
||||
walkRoot := getWalkRoot(srcPath, include)
|
||||
|
|
@ -1002,7 +1016,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
|
||||
}
|
||||
|
||||
if err := ta.addTarFile(filePath, relFilePath); err != nil {
|
||||
if err := ta.addFile(filePath, relFilePath); err != nil {
|
||||
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
|
||||
// if pipe is broken, stop writing tar stream to it
|
||||
if err == io.ErrClosedPipe {
|
||||
|
|
@ -1011,10 +1025,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
logrus.Errorf("%s", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ta.TarWriter.Close()
|
||||
}
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
err := tarWithOptionsTo(pipeWriter, srcPath, options)
|
||||
if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil {
|
||||
logrus.Errorf("Can't close pipe writer: %s", pipeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
return pipeReader, nil
|
||||
|
|
@ -1110,7 +1132,7 @@ loop:
|
|||
continue
|
||||
}
|
||||
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -1137,7 +1159,7 @@ loop:
|
|||
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
}
|
||||
|
||||
if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -1201,9 +1223,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
|
|||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
r := tarArchive
|
||||
if decompress {
|
||||
|
|
@ -1389,7 +1408,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
|
|||
} else if runtime.GOOS == darwin {
|
||||
uid, gid = hdr.Uid, hdr.Gid
|
||||
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
attrs := strings.Split(xstat, ":")
|
||||
if len(attrs) >= 3 {
|
||||
val, err := strconv.ParseUint(attrs[0], 10, 32)
|
||||
if err != nil {
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
|
|
@ -16,7 +16,7 @@ func getOverlayOpaqueXattrName() string {
|
|||
return GetOverlayXattrName("opaque")
|
||||
}
|
||||
|
||||
func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter {
|
||||
func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter {
|
||||
if format == OverlayWhiteoutFormat {
|
||||
if rolayers, ok := data.([]string); ok && len(rolayers) > 0 {
|
||||
return overlayWhiteoutConverter{rolayers: rolayers}
|
||||
|
|
@ -173,7 +173,7 @@ func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (boo
|
|||
|
||||
func isWhiteOut(stat os.FileInfo) bool {
|
||||
s := stat.Sys().(*syscall.Stat_t)
|
||||
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0
|
||||
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 //nolint:unconvert
|
||||
}
|
||||
|
||||
func GetFileOwner(path string) (uint32, uint32, uint32, error) {
|
||||
|
|
|
|||
6
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
|
|
@ -67,7 +67,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
|
|
@ -82,7 +82,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
|
|||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
func getInodeFromStat(stat any) (inode uint64, err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
|
|
@ -92,7 +92,7 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
func getFileUIDGID(stat any) (idtools.IDPair, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
|
|
|
|||
6
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
|
|
@ -70,7 +70,7 @@ func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
|||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
return a.Equal(b) ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
|
@ -452,7 +452,7 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
|||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
||||
ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
|
@ -481,7 +481,7 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa
|
|||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||
if err := ta.addFile(path, change.Path[1:]); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
11
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
|
|
@ -174,14 +174,7 @@ func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
|||
ix1 := 0
|
||||
ix2 := 0
|
||||
|
||||
for {
|
||||
if ix1 >= len(names1) {
|
||||
break
|
||||
}
|
||||
if ix2 >= len(names2) {
|
||||
break
|
||||
}
|
||||
|
||||
for ix1 < len(names1) && ix2 < len(names2) {
|
||||
ni1 := names1[ix1]
|
||||
ni2 := names2[ix2]
|
||||
|
||||
|
|
@ -304,7 +297,7 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno)
|
|||
continue
|
||||
}
|
||||
builder := make([]byte, 0, dirent.Reclen)
|
||||
for i := 0; i < len(dirent.Name); i++ {
|
||||
for i := range len(dirent.Name) {
|
||||
if dirent.Name[i] == 0 {
|
||||
break
|
||||
}
|
||||
|
|
|
|||
11
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
|
|
@ -31,9 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
|
||||
aufsTempdir := ""
|
||||
|
|
@ -107,7 +104,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
|
@ -176,12 +173,12 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
// We always reset the immutable flag (if present) to allow metadata
|
||||
// changes and to allow directory modification. The flag will be
|
||||
// re-applied based on the contents of hdr either at the end for
|
||||
// directories or in createTarFile otherwise.
|
||||
// directories or in extractTarFileEntry otherwise.
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if err := resetImmutable(path, &fi); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if !fi.IsDir() || hdr.Typeflag != tar.TypeDir {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
@ -212,7 +209,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
|
|
@ -69,9 +69,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
|||
options = &archive.TarOptions{}
|
||||
options.InUserNS = unshare.IsRootless()
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
rootIDs := idMappings.RootPair()
|
||||
|
|
|
|||
3
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
|
|
@ -98,9 +98,6 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
|
|||
options.InUserNS = true
|
||||
}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
data, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
generated
vendored
|
|
@ -1,4 +0,0 @@
|
|||
package chrootarchive
|
||||
|
||||
func init() {
|
||||
}
|
||||
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go
generated
vendored
|
|
@ -1,4 +0,0 @@
|
|||
package chrootarchive
|
||||
|
||||
func init() {
|
||||
}
|
||||
2
vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
generated
vendored
|
|
@ -65,7 +65,7 @@ func (bf *bloomFilter) writeTo(writer io.Writer) error {
|
|||
if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil {
|
||||
if err := binary.Write(writer, binary.LittleEndian, bf.k); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {
|
||||
|
|
|
|||
6
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"slices"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
|
@ -17,7 +18,6 @@ import (
|
|||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
expMaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -87,7 +87,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
|
||||
}
|
||||
|
||||
size := int64(blobSize - footerSize - tocOffset)
|
||||
size := blobSize - footerSize - tocOffset
|
||||
// set a reasonable limit
|
||||
if size > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
|
|
@ -310,7 +310,7 @@ func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
|
|||
return err
|
||||
}
|
||||
if len(pendingFiles) != 0 {
|
||||
remaining := expMaps.Keys(pendingFiles)
|
||||
remaining := slices.Collect(maps.Keys(pendingFiles))
|
||||
if len(remaining) > 5 {
|
||||
remaining = remaining[:5] // Just to limit the size of the output.
|
||||
}
|
||||
|
|
|
|||
7
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
7
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
|
|
@ -142,10 +142,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
|
|||
rc.IsLastChunkZeros = false
|
||||
|
||||
if rc.pendingHole > 0 {
|
||||
toCopy := int64(len(b))
|
||||
if rc.pendingHole < toCopy {
|
||||
toCopy = rc.pendingHole
|
||||
}
|
||||
toCopy := min(rc.pendingHole, int64(len(b)))
|
||||
rc.pendingHole -= toCopy
|
||||
for i := int64(0); i < toCopy; i++ {
|
||||
b[i] = 0
|
||||
|
|
@ -163,7 +160,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
|
|||
return false, 0, io.EOF
|
||||
}
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
for i := range b {
|
||||
holeLen, n, err := rc.reader.readByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
|
|
@ -43,7 +43,7 @@ func escaped(val []byte, escape int) string {
|
|||
}
|
||||
|
||||
var result string
|
||||
for _, c := range []byte(val) {
|
||||
for _, c := range val {
|
||||
hexEscape := false
|
||||
var special string
|
||||
|
||||
|
|
@ -214,7 +214,7 @@ func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[s
|
|||
}
|
||||
|
||||
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
|
||||
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
|
||||
func GenerateDump(tocI any, verityDigests map[string]string) (io.Reader, error) {
|
||||
toc, ok := tocI.(*minimal.TOC)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid TOC type")
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go
generated
vendored
|
|
@ -234,7 +234,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
Offset: manifestOffset,
|
||||
LengthCompressed: uint64(len(compressedManifest)),
|
||||
LengthUncompressed: uint64(len(manifest)),
|
||||
OffsetTarSplit: uint64(tarSplitOffset),
|
||||
OffsetTarSplit: tarSplitOffset,
|
||||
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
|
||||
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
|
||||
}
|
||||
|
|
|
|||
24
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
24
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
|
|
@ -111,7 +111,7 @@ type chunkedDiffer struct {
|
|||
useFsVerity graphdriver.DifferFsVerity
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
var xattrsToIgnore = map[string]any{
|
||||
"security.selinux": true,
|
||||
}
|
||||
|
||||
|
|
@ -162,6 +162,8 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
|
|||
return 0, nil, "", nil, err
|
||||
}
|
||||
|
||||
defer diff.Close()
|
||||
|
||||
fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||
if err != nil {
|
||||
return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err}
|
||||
|
|
@ -696,18 +698,12 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed
|
|||
|
||||
// hashHole writes SIZE zeros to the specified hasher
|
||||
func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
|
||||
count := int64(len(copyBuffer))
|
||||
if size < count {
|
||||
count = size
|
||||
}
|
||||
count := min(size, int64(len(copyBuffer)))
|
||||
for i := int64(0); i < count; i++ {
|
||||
copyBuffer[i] = 0
|
||||
}
|
||||
for size > 0 {
|
||||
count = int64(len(copyBuffer))
|
||||
if size < count {
|
||||
count = size
|
||||
}
|
||||
count = min(size, int64(len(copyBuffer)))
|
||||
if _, err := h.Write(copyBuffer[:count]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -1015,7 +1011,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
|
|||
!missingParts[prevIndex].Hole && !missingParts[i].Hole &&
|
||||
len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
|
||||
missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
|
||||
missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
|
||||
missingParts[prevIndex].SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
|
||||
missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
|
||||
missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
|
||||
} else {
|
||||
|
|
@ -1073,7 +1069,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
|
|||
} else {
|
||||
gap := getGap(missingParts, i)
|
||||
prev := &newMissingParts[len(newMissingParts)-1]
|
||||
prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
|
||||
prev.SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
|
||||
prev.Hole = false
|
||||
prev.OriginFile = nil
|
||||
if gap > 0 {
|
||||
|
|
@ -1269,7 +1265,7 @@ func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCl
|
|||
tooManyStreams := false
|
||||
streamsSoFar := 0
|
||||
|
||||
err := errors.New("Unexpected error in getBlobAtGoroutine")
|
||||
err := errors.New("unexpected error in getBlobAtGoroutine")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
|
@ -1487,7 +1483,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
bigDataKey: c.manifest,
|
||||
chunkedLayerDataKey: lcdBigData,
|
||||
},
|
||||
Artifacts: map[string]interface{}{
|
||||
Artifacts: map[string]any{
|
||||
tocKey: toc,
|
||||
},
|
||||
TOCDigest: c.tocDigest,
|
||||
|
|
@ -1765,7 +1761,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
|
||||
// the file is missing, attempt to find individual chunks.
|
||||
for _, chunk := range r.chunks {
|
||||
compressedSize := int64(chunk.EndOffset - chunk.Offset)
|
||||
compressedSize := chunk.EndOffset - chunk.Offset
|
||||
size := remainingSize
|
||||
if chunk.ChunkSize > 0 {
|
||||
size = chunk.ChunkSize
|
||||
|
|
|
|||
6
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
|
|
@ -42,13 +42,11 @@ func Usage(dir string) (usage *DiskUsage, err error) {
|
|||
|
||||
// Check inode to only count the sizes of files with multiple hard links once.
|
||||
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
|
||||
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
||||
if _, exists := data[uint64(inode)]; exists {
|
||||
if _, exists := data[inode]; exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
// inode is not a uint64 on all platforms. Cast it to avoid issues.
|
||||
data[uint64(inode)] = struct{}{}
|
||||
data[inode] = struct{}{}
|
||||
// Ignore directory sizes
|
||||
if entry.IsDir() {
|
||||
return nil
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
generated
vendored
|
|
@ -13,7 +13,7 @@ import (
|
|||
func Exists(path string) error {
|
||||
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
|
||||
// simply checking the existence of a file.
|
||||
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
|
||||
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "faccessat", Path: path, Err: err}
|
||||
}
|
||||
|
|
@ -25,7 +25,7 @@ func Exists(path string) error {
|
|||
func Lexists(path string) error {
|
||||
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
|
||||
// simply checking the existence of a file.
|
||||
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
|
||||
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "faccessat", Path: path, Err: err}
|
||||
}
|
||||
|
|
|
|||
20
vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go
generated
vendored
Normal file
20
vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package fileutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// ReflinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func ReflinkOrCopy(src, dst *os.File) error {
|
||||
err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd()))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
15
vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
//go:build !linux
|
||||
|
||||
package fileutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReflinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func ReflinkOrCopy(src, dst *os.File) error {
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
||||
2
vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
generated
vendored
|
|
@ -25,7 +25,7 @@ func CreateIDMappedMount(source, target string, pid int) error {
|
|||
}
|
||||
defer userNsFile.Close()
|
||||
|
||||
targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
|
||||
targetDirFd, err := unix.OpenTree(unix.AT_FDCWD, source, unix.OPEN_TREE_CLONE)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "open_tree", Path: source, Err: err}
|
||||
}
|
||||
|
|
|
|||
18
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
|
|
@ -429,25 +429,25 @@ func parseOverrideXattr(xstat []byte) (Stat, error) {
|
|||
var stat Stat
|
||||
attrs := strings.Split(string(xstat), ":")
|
||||
if len(attrs) < 3 {
|
||||
return stat, fmt.Errorf("The number of parts in %s is less than 3",
|
||||
return stat, fmt.Errorf("the number of parts in %s is less than 3",
|
||||
ContainersOverrideXattr)
|
||||
}
|
||||
|
||||
value, err := strconv.ParseUint(attrs[0], 10, 32)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse UID: %w", err)
|
||||
return stat, fmt.Errorf("failed to parse UID: %w", err)
|
||||
}
|
||||
stat.IDs.UID = int(value)
|
||||
|
||||
value, err = strconv.ParseUint(attrs[1], 10, 32)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse GID: %w", err)
|
||||
return stat, fmt.Errorf("failed to parse GID: %w", err)
|
||||
}
|
||||
stat.IDs.GID = int(value)
|
||||
|
||||
value, err = strconv.ParseUint(attrs[2], 8, 32)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("Failed to parse mode: %w", err)
|
||||
return stat, fmt.Errorf("failed to parse mode: %w", err)
|
||||
}
|
||||
stat.Mode = os.FileMode(value) & os.ModePerm
|
||||
if value&0o1000 != 0 {
|
||||
|
|
@ -484,7 +484,7 @@ func parseOverrideXattr(xstat []byte) (Stat, error) {
|
|||
return stat, err
|
||||
}
|
||||
} else {
|
||||
return stat, fmt.Errorf("Invalid file type %s", typ)
|
||||
return stat, fmt.Errorf("invalid file type %s", typ)
|
||||
}
|
||||
}
|
||||
return stat, nil
|
||||
|
|
@ -494,18 +494,18 @@ func parseDevice(typ string) (int, int, error) {
|
|||
parts := strings.Split(typ, "-")
|
||||
// If there are more than 3 parts, just ignore them to be forward compatible
|
||||
if len(parts) < 3 {
|
||||
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
|
||||
return 0, 0, fmt.Errorf("invalid device type %s", typ)
|
||||
}
|
||||
if parts[0] != "block" && parts[0] != "char" {
|
||||
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
|
||||
return 0, 0, fmt.Errorf("invalid device type %s", typ)
|
||||
}
|
||||
major, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to parse major number: %w", err)
|
||||
return 0, 0, fmt.Errorf("failed to parse major number: %w", err)
|
||||
}
|
||||
minor, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err)
|
||||
return 0, 0, fmt.Errorf("failed to parse minor number: %w", err)
|
||||
}
|
||||
return major, minor, nil
|
||||
}
|
||||
|
|
|
|||
13
vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
generated
vendored
|
|
@ -5,6 +5,7 @@ package idtools
|
|||
import (
|
||||
"errors"
|
||||
"os/user"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
|
@ -13,16 +14,14 @@ import (
|
|||
#include <shadow/subid.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
const char *Prog = "storage";
|
||||
FILE *shadow_logfd = NULL;
|
||||
|
||||
struct subid_range get_range(struct subid_range *ranges, int i)
|
||||
{
|
||||
shadow_logfd = stderr;
|
||||
return ranges[i];
|
||||
return ranges[i];
|
||||
}
|
||||
|
||||
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
|
||||
# define subid_init libsubid_init
|
||||
# define subid_get_uid_ranges get_subuid_ranges
|
||||
# define subid_get_gid_ranges get_subgid_ranges
|
||||
#endif
|
||||
|
|
@ -30,6 +29,8 @@ struct subid_range get_range(struct subid_range *ranges, int i)
|
|||
*/
|
||||
import "C"
|
||||
|
||||
var onceInit sync.Once
|
||||
|
||||
func readSubid(username string, isUser bool) (ranges, error) {
|
||||
var ret ranges
|
||||
uidstr := ""
|
||||
|
|
@ -42,6 +43,10 @@ func readSubid(username string, isUser bool) (ranges, error) {
|
|||
uidstr = u.Uid
|
||||
}
|
||||
|
||||
onceInit.Do(func() {
|
||||
C.subid_init(C.CString("storage"), C.stderr)
|
||||
})
|
||||
|
||||
cUsername := C.CString(username)
|
||||
defer C.free(unsafe.Pointer(cUsername))
|
||||
|
||||
|
|
|
|||
7
vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go
generated
vendored
7
vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go
generated
vendored
|
|
@ -93,10 +93,7 @@ loop0:
|
|||
}
|
||||
|
||||
// add new byte slice to the buffers slice and continue writing
|
||||
nextCap := b.Cap() * 2
|
||||
if nextCap > maxCap {
|
||||
nextCap = maxCap
|
||||
}
|
||||
nextCap := min(b.Cap()*2, maxCap)
|
||||
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||
}
|
||||
bp.wait.Broadcast()
|
||||
|
|
@ -178,7 +175,7 @@ func getBuffer(size int) *fixedBuffer {
|
|||
bufPoolsLock.Lock()
|
||||
pool, ok := bufPools[size]
|
||||
if !ok {
|
||||
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||
pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||
bufPools[size] = pool
|
||||
}
|
||||
bufPoolsLock.Unlock()
|
||||
|
|
|
|||
6
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
|
|
@ -16,8 +16,8 @@ import (
|
|||
// Loopback related errors
|
||||
var (
|
||||
ErrAttachLoopbackDevice = errors.New("loopback attach failed")
|
||||
ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
|
||||
ErrSetCapacity = errors.New("Unable set loopback capacity")
|
||||
ErrGetLoopbackBackingFile = errors.New("unable to get loopback backing file")
|
||||
ErrSetCapacity = errors.New("unable set loopback capacity")
|
||||
)
|
||||
|
||||
func stringToLoopName(src string) [LoNameSize]uint8 {
|
||||
|
|
@ -113,7 +113,7 @@ func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (*os.File
|
|||
logrus.Errorf("Getting loopback backing file: %s", err)
|
||||
return nil, ErrGetLoopbackBackingFile
|
||||
}
|
||||
if dev != uint64(st.Dev) || ino != st.Ino {
|
||||
if dev != uint64(st.Dev) || ino != st.Ino { //nolint:unconvert
|
||||
logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
|
||||
}
|
||||
return loopFile, nil
|
||||
|
|
|
|||
26
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
26
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
|
|
@ -2,6 +2,10 @@
|
|||
|
||||
package loopback
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type loopInfo64 struct {
|
||||
loDevice uint64 /* ioctl r/o */
|
||||
loInode uint64 /* ioctl r/o */
|
||||
|
|
@ -20,19 +24,19 @@ type loopInfo64 struct {
|
|||
|
||||
// IOCTL consts
|
||||
const (
|
||||
LoopSetFd = 0x4C00
|
||||
LoopCtlGetFree = 0x4C82
|
||||
LoopGetStatus64 = 0x4C05
|
||||
LoopSetStatus64 = 0x4C04
|
||||
LoopClrFd = 0x4C01
|
||||
LoopSetCapacity = 0x4C07
|
||||
LoopSetFd = unix.LOOP_SET_FD
|
||||
LoopCtlGetFree = unix.LOOP_CTL_GET_FREE
|
||||
LoopGetStatus64 = unix.LOOP_GET_STATUS64
|
||||
LoopSetStatus64 = unix.LOOP_SET_STATUS64
|
||||
LoopClrFd = unix.LOOP_CLR_FD
|
||||
LoopSetCapacity = unix.LOOP_SET_CAPACITY
|
||||
)
|
||||
|
||||
// LOOP consts.
|
||||
const (
|
||||
LoFlagsAutoClear = 0x4C07
|
||||
LoFlagsReadOnly = 1
|
||||
LoFlagsPartScan = 8
|
||||
LoKeySize = 32
|
||||
LoNameSize = 64
|
||||
LoFlagsAutoClear = unix.LO_FLAGS_AUTOCLEAR
|
||||
LoFlagsReadOnly = unix.LO_FLAGS_READ_ONLY
|
||||
LoFlagsPartScan = unix.LO_FLAGS_PARTSCAN
|
||||
LoKeySize = unix.LO_KEY_SIZE
|
||||
LoNameSize = unix.LO_NAME_SIZE
|
||||
)
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
|
|
@ -36,7 +36,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
|
|||
return nil
|
||||
}
|
||||
targetInode := stat.Sys().(*syscall.Stat_t).Ino
|
||||
targetDevice := stat.Sys().(*syscall.Stat_t).Dev
|
||||
targetDevice := uint64(stat.Sys().(*syscall.Stat_t).Dev) //nolint:unconvert
|
||||
|
||||
for i := 0; true; i++ {
|
||||
path := fmt.Sprintf("/dev/loop%d", i)
|
||||
|
|
@ -53,7 +53,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
|
|||
}
|
||||
|
||||
dev, inode, err := getLoopbackBackingFile(file)
|
||||
if err == nil && dev == uint64(targetDevice) && inode == targetInode {
|
||||
if err == nil && dev == targetDevice && inode == targetInode {
|
||||
return file
|
||||
}
|
||||
file.Close()
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/pools/pools.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/pools/pools.go
generated
vendored
|
|
@ -40,7 +40,7 @@ func init() {
|
|||
// added here to be shared where required.
|
||||
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||
New: func() any { return bufio.NewReaderSize(nil, size) },
|
||||
}
|
||||
return &BufioReaderPool{pool: pool}
|
||||
}
|
||||
|
|
@ -87,7 +87,7 @@ type BufioWriterPool struct {
|
|||
// added here to be shared where required.
|
||||
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||
New: func() any { return bufio.NewWriterSize(nil, size) },
|
||||
}
|
||||
return &BufioWriterPool{pool: pool}
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/storage/pkg/reexec/reexec.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/reexec/reexec.go
generated
vendored
|
|
@ -49,7 +49,7 @@ func panicIfNotInitialized() {
|
|||
}
|
||||
}
|
||||
|
||||
func naiveSelf() string { //nolint: unused
|
||||
func naiveSelf() string {
|
||||
name := os.Args[0]
|
||||
if filepath.Base(name) == name {
|
||||
if lp, err := exec.LookPath(name); err == nil {
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
|
|
@ -24,7 +24,7 @@ func GenerateRandomASCIIString(n int) string {
|
|||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
|
||||
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
|
||||
res := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
res[i] = chars[rand.IntN(len(chars))]
|
||||
}
|
||||
return string(res)
|
||||
|
|
@ -83,7 +83,7 @@ func quote(word string, buf *bytes.Buffer) {
|
|||
|
||||
buf.WriteString("'")
|
||||
|
||||
for i := 0; i < len(word); i++ {
|
||||
for i := range len(word) {
|
||||
b := word[i]
|
||||
if b == '\'' {
|
||||
// Replace literal ' with a close ', a \', and an open '
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/system/stat_linux.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/system/stat_linux.go
generated
vendored
|
|
@ -9,9 +9,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
|||
mode: s.Mode,
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: uint64(s.Rdev),
|
||||
rdev: uint64(s.Rdev), //nolint:unconvert
|
||||
mtim: s.Mtim,
|
||||
dev: uint64(s.Dev),
|
||||
dev: uint64(s.Dev), //nolint:unconvert
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
25
vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
generated
vendored
25
vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
generated
vendored
|
|
@ -32,9 +32,9 @@ type Cmd struct {
|
|||
*exec.Cmd
|
||||
UnshareFlags int
|
||||
UseNewuidmap bool
|
||||
UidMappings []specs.LinuxIDMapping // nolint: revive,golint
|
||||
UidMappings []specs.LinuxIDMapping //nolint: revive
|
||||
UseNewgidmap bool
|
||||
GidMappings []specs.LinuxIDMapping // nolint: revive,golint
|
||||
GidMappings []specs.LinuxIDMapping //nolint: revive
|
||||
GidMappingsEnableSetgroups bool
|
||||
Setsid bool
|
||||
Setpgrp bool
|
||||
|
|
@ -98,7 +98,7 @@ func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error
|
|||
return cap.Get(capability.EFFECTIVE, capid), nil
|
||||
}
|
||||
|
||||
func (c *Cmd) Start() error {
|
||||
func (c *Cmd) Start() (retErr error) {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
|
|
@ -167,6 +167,15 @@ func (c *Cmd) Start() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// If the function fails from here, we need to make sure the
|
||||
// child process is killed and properly cleaned up.
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
_ = c.Cmd.Process.Kill()
|
||||
_ = c.Cmd.Wait()
|
||||
}
|
||||
}()
|
||||
|
||||
// Close the ends of the pipes that the parent doesn't need.
|
||||
continueRead.Close()
|
||||
continueRead = nil
|
||||
|
|
@ -240,7 +249,7 @@ func (c *Cmd) Start() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("finding newgidmap: %w", err)
|
||||
}
|
||||
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
|
||||
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...)
|
||||
g.Reset()
|
||||
cmd.Stdout = g
|
||||
cmd.Stderr = g
|
||||
|
|
@ -258,7 +267,7 @@ func (c *Cmd) Start() error {
|
|||
}
|
||||
logrus.Warnf("Falling back to single mapping")
|
||||
g.Reset()
|
||||
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
|
||||
fmt.Fprintf(g, "0 %d 1\n", os.Getegid())
|
||||
}
|
||||
}
|
||||
if !gidmapSet {
|
||||
|
|
@ -300,7 +309,7 @@ func (c *Cmd) Start() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("finding newuidmap: %w", err)
|
||||
}
|
||||
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
|
||||
cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...)
|
||||
u.Reset()
|
||||
cmd.Stdout = u
|
||||
cmd.Stderr = u
|
||||
|
|
@ -319,7 +328,7 @@ func (c *Cmd) Start() error {
|
|||
|
||||
logrus.Warnf("Falling back to single mapping")
|
||||
u.Reset()
|
||||
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
|
||||
fmt.Fprintf(u, "0 %d 1\n", os.Geteuid())
|
||||
}
|
||||
}
|
||||
if !uidmapSet {
|
||||
|
|
@ -459,7 +468,7 @@ type Runnable interface {
|
|||
Run() error
|
||||
}
|
||||
|
||||
func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname
|
||||
func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname
|
||||
if err != nil {
|
||||
if format != "" {
|
||||
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue