go.mod: update osbuild/images to v0.69.0
This commit is contained in:
parent
1cc90c6a0b
commit
8ac80e8abc
611 changed files with 28281 additions and 32629 deletions
10
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
10
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
|
|
@ -1,7 +1,3 @@
|
|||
run:
|
||||
skip-dirs:
|
||||
- pkg/etw/sample
|
||||
|
||||
linters:
|
||||
enable:
|
||||
# style
|
||||
|
|
@ -20,9 +16,13 @@ linters:
|
|||
- gofmt # files are gofmt'ed
|
||||
- gosec # security
|
||||
- nilerr # returns nil even with non-nil error
|
||||
- thelper # test helpers without t.Helper()
|
||||
- unparam # unused function params
|
||||
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- pkg/etw/sample
|
||||
|
||||
exclude-rules:
|
||||
# err is very often shadowed in nested scopes
|
||||
- linters:
|
||||
|
|
@ -69,9 +69,7 @@ linters-settings:
|
|||
# struct order is often for Win32 compat
|
||||
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
|
||||
- fieldalignment
|
||||
check-shadowing: true
|
||||
nolintlint:
|
||||
allow-leading-space: false
|
||||
require-explanation: true
|
||||
require-specific: true
|
||||
revive:
|
||||
|
|
|
|||
33
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
33
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
|
|
@ -10,14 +10,14 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/fs"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
|
|
@ -104,7 +104,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
|||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
hdr.Name = windows.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamID == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
|
|
@ -205,7 +205,7 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
|||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
|
||||
}
|
||||
|
|
@ -220,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
|
|||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
_ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
_ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
|
|
@ -244,7 +244,7 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
|||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
|
||||
}
|
||||
|
|
@ -259,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
|||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
_ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
_ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
|
|
@ -271,17 +271,14 @@ func (w *BackupFileWriter) Close() error {
|
|||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0],
|
||||
access,
|
||||
share,
|
||||
h, err := fs.CreateFile(path,
|
||||
fs.AccessMask(access),
|
||||
fs.FileShareMode(share),
|
||||
nil,
|
||||
createmode,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT,
|
||||
0)
|
||||
fs.FileCreationDisposition(createmode),
|
||||
fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
|
|
|
|||
9
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
9
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
|
|
@ -11,7 +11,6 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
|
|
@ -106,7 +105,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
|
|||
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
if (fileInfo.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= cISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
|
|
@ -378,7 +377,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
|||
// WriteTarFileFromBackupStream.
|
||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
name = hdr.Name
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
|
|
@ -396,7 +395,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
|||
fileInfo.FileAttributes = uint32(attr)
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
||||
fileInfo.FileAttributes |= windows.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
|
||||
|
|
@ -469,7 +468,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
|||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
Size: hdr.Size,
|
||||
|
|
|
|||
85
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
85
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
|
|
@ -15,26 +15,11 @@ import (
|
|||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
|
||||
//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
|
|
@ -50,7 +35,7 @@ func (*timeoutError) Temporary() bool { return true }
|
|||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
var ioCompletionPort windows.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation.
|
||||
type ioResult struct {
|
||||
|
|
@ -60,12 +45,12 @@ type ioResult struct {
|
|||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO.
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
o windows.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIO() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
@ -76,10 +61,10 @@ func initIO() {
|
|||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
handle windows.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
closing atomic.Bool
|
||||
socket bool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
|
|
@ -90,11 +75,11 @@ type deadlineHandler struct {
|
|||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomicBool
|
||||
timedout atomic.Bool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle.
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
func makeWin32File(h windows.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIO)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
|
|
@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
|
|||
return f, nil
|
||||
}
|
||||
|
||||
// Deprecated: use NewOpenFile instead.
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return NewOpenFile(windows.Handle(h))
|
||||
}
|
||||
|
||||
func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) {
|
||||
// If we return the result of makeWin32File directly, it can result in an
|
||||
// interface-wrapped nil, rather than a nil interface value.
|
||||
f, err := makeWin32File(h)
|
||||
|
|
@ -124,13 +114,13 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
|||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
if !f.closing.Swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
_ = cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
windows.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
|
|
@ -145,14 +135,14 @@ func (f *win32File) Close() error {
|
|||
|
||||
// IsClosed checks if the file has been closed.
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.isSet()
|
||||
return f.closing.Load()
|
||||
}
|
||||
|
||||
// prepareIO prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIO() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
if f.closing.Load() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
|
|
@ -164,12 +154,12 @@ func (f *win32File) prepareIO() (*ioOperation, error) {
|
|||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever.
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
func ioCompletionProcessor(h windows.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
@ -182,11 +172,11 @@ func ioCompletionProcessor(h syscall.Handle) {
|
|||
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
|
||||
if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
if f.closing.Load() {
|
||||
_ = cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
|
|
@ -201,8 +191,8 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
if f.closing.isSet() {
|
||||
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
if f.closing.Load() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
} else if err != nil && f.socket {
|
||||
|
|
@ -214,7 +204,7 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
|||
_ = cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
|
|
@ -235,23 +225,22 @@ func (f *win32File) Read(b []byte) (int, error) {
|
|||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.isSet() {
|
||||
if f.readDeadline.timedout.Load() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
err = windows.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
|
||||
} else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
|
|
@ -262,12 +251,12 @@ func (f *win32File) Write(b []byte) (int, error) {
|
|||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.isSet() {
|
||||
if f.writeDeadline.timedout.Load() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
err = windows.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
|
|
@ -282,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
|||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
return windows.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (f *win32File) Fd() uintptr {
|
||||
|
|
@ -299,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
|
|||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.setFalse()
|
||||
d.timedout.Store(false)
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
|
|
@ -314,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error {
|
|||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.setTrue()
|
||||
d.timedout.Store(true)
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
|
|
|
|||
22
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
22
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
|
|
@ -18,9 +18,18 @@ type FileBasicInfo struct {
|
|||
_ uint32 // padding
|
||||
}
|
||||
|
||||
// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing
|
||||
// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64
|
||||
// alignment is necessary to pass this as FILE_BASIC_INFO.
|
||||
type alignedFileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64
|
||||
FileAttributes uint32
|
||||
_ uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
bi := &alignedFileBasicInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
|
|
@ -30,16 +39,21 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
|||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return bi, nil
|
||||
// Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the
|
||||
// public API of this module. The data may be unnecessarily aligned.
|
||||
return (*FileBasicInfo)(unsafe.Pointer(bi)), nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
// Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is
|
||||
// suitable to pass to GetFileInformationByHandleEx.
|
||||
biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi))
|
||||
if err := windows.SetFileInformationByHandle(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
(*byte)(unsafe.Pointer(bi)),
|
||||
uint32(unsafe.Sizeof(*bi)),
|
||||
(*byte)(unsafe.Pointer(&biAligned)),
|
||||
uint32(unsafe.Sizeof(biAligned)),
|
||||
); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
|
|
|
|||
47
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
47
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
|
|
@ -10,7 +10,6 @@ import (
|
|||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
|
|
@ -181,13 +180,13 @@ type HvsockConn struct {
|
|||
var _ net.Conn = &HvsockConn{}
|
||||
|
||||
func newHVSocket() (*win32File, error) {
|
||||
fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1)
|
||||
fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
f, err := makeWin32File(fd)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
windows.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
f.socket = true
|
||||
|
|
@ -197,16 +196,24 @@ func newHVSocket() (*win32File, error) {
|
|||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||
l := &HvsockListener{addr: *addr}
|
||||
sock, err := newHVSocket()
|
||||
|
||||
var sock *win32File
|
||||
sock, err = newHVSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = sock.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
sa := addr.raw()
|
||||
err = socket.Bind(windows.Handle(sock.handle), &sa)
|
||||
err = socket.Bind(sock.handle, &sa)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||
}
|
||||
err = syscall.Listen(sock.handle, 16)
|
||||
err = windows.Listen(sock.handle, 16)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||
}
|
||||
|
|
@ -246,7 +253,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
|||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
|
||||
err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
|
||||
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
|
|
@ -263,7 +270,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
|||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||
|
||||
// initialize the accepted socket and update its properties with those of the listening socket
|
||||
if err = windows.Setsockopt(windows.Handle(sock.handle),
|
||||
if err = windows.Setsockopt(sock.handle,
|
||||
windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
|
||||
(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
|
||||
return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
|
||||
|
|
@ -334,7 +341,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
|||
}()
|
||||
|
||||
sa := addr.raw()
|
||||
err = socket.Bind(windows.Handle(sock.handle), &sa)
|
||||
err = socket.Bind(sock.handle, &sa)
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("bind", err))
|
||||
}
|
||||
|
|
@ -347,7 +354,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
|||
var bytes uint32
|
||||
for i := uint(0); i <= d.Retries; i++ {
|
||||
err = socket.ConnectEx(
|
||||
windows.Handle(sock.handle),
|
||||
sock.handle,
|
||||
&sa,
|
||||
nil, // sendBuf
|
||||
0, // sendDataLen
|
||||
|
|
@ -367,7 +374,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
|||
|
||||
// update the connection properties, so shutdown can be used
|
||||
if err = windows.Setsockopt(
|
||||
windows.Handle(sock.handle),
|
||||
sock.handle,
|
||||
windows.SOL_SOCKET,
|
||||
windows.SO_UPDATE_CONNECT_CONTEXT,
|
||||
nil, // optvalue
|
||||
|
|
@ -378,7 +385,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock
|
|||
|
||||
// get the local name
|
||||
var sal rawHvsockAddr
|
||||
err = socket.GetSockName(windows.Handle(sock.handle), &sal)
|
||||
err = socket.GetSockName(sock.handle, &sal)
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
|
||||
}
|
||||
|
|
@ -421,7 +428,7 @@ func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
|
|||
return ctx.Err()
|
||||
}
|
||||
|
||||
// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall.
|
||||
// assumes error is a plain, unwrapped windows.Errno provided by direct syscall.
|
||||
func canRedial(err error) bool {
|
||||
//nolint:errorlint // guaranteed to be an Errno
|
||||
switch err {
|
||||
|
|
@ -447,9 +454,9 @@ func (conn *HvsockConn) Read(b []byte) (int, error) {
|
|||
return 0, conn.opErr("read", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var flags, bytes uint32
|
||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
|
||||
if err != nil {
|
||||
var eno windows.Errno
|
||||
|
|
@ -482,9 +489,9 @@ func (conn *HvsockConn) write(b []byte) (int, error) {
|
|||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var bytes uint32
|
||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
|
||||
if err != nil {
|
||||
var eno windows.Errno
|
||||
|
|
@ -511,7 +518,7 @@ func (conn *HvsockConn) shutdown(how int) error {
|
|||
return socket.ErrSocketClosed
|
||||
}
|
||||
|
||||
err := syscall.Shutdown(conn.sock.handle, how)
|
||||
err := windows.Shutdown(conn.sock.handle, how)
|
||||
if err != nil {
|
||||
// If the connection was closed, shutdowns fail with "not connected"
|
||||
if errors.Is(err, windows.WSAENOTCONN) ||
|
||||
|
|
@ -525,7 +532,7 @@ func (conn *HvsockConn) shutdown(how int) error {
|
|||
|
||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
err := conn.shutdown(windows.SHUT_RD)
|
||||
if err != nil {
|
||||
return conn.opErr("closeread", err)
|
||||
}
|
||||
|
|
@ -535,7 +542,7 @@ func (conn *HvsockConn) CloseRead() error {
|
|||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||
// notifying the other endpoint that no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
err := conn.shutdown(windows.SHUT_WR)
|
||||
if err != nil {
|
||||
return conn.opErr("closewrite", err)
|
||||
}
|
||||
|
|
|
|||
72
vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
generated
vendored
72
vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
generated
vendored
|
|
@ -11,12 +11,14 @@ import (
|
|||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
|
||||
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
|
||||
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
|
||||
|
||||
const NullHandle windows.Handle = 0
|
||||
|
||||
// AccessMask defines standard, specific, and generic rights.
|
||||
//
|
||||
// Used with CreateFile and NtCreateFile (and co.).
|
||||
//
|
||||
// Bitmask:
|
||||
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
||||
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
||||
|
|
@ -47,6 +49,12 @@ const (
|
|||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
|
||||
FILE_ANY_ACCESS AccessMask = 0
|
||||
|
||||
GENERIC_READ AccessMask = 0x8000_0000
|
||||
GENERIC_WRITE AccessMask = 0x4000_0000
|
||||
GENERIC_EXECUTE AccessMask = 0x2000_0000
|
||||
GENERIC_ALL AccessMask = 0x1000_0000
|
||||
ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000
|
||||
|
||||
// Specific Object Access
|
||||
// from ntioapi.h
|
||||
|
||||
|
|
@ -124,14 +132,32 @@ const (
|
|||
TRUNCATE_EXISTING FileCreationDisposition = 0x05
|
||||
)
|
||||
|
||||
// Create disposition values for NtCreate*
|
||||
type NTFileCreationDisposition uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// From ntioapi.h
|
||||
|
||||
FILE_SUPERSEDE NTFileCreationDisposition = 0x00
|
||||
FILE_OPEN NTFileCreationDisposition = 0x01
|
||||
FILE_CREATE NTFileCreationDisposition = 0x02
|
||||
FILE_OPEN_IF NTFileCreationDisposition = 0x03
|
||||
FILE_OVERWRITE NTFileCreationDisposition = 0x04
|
||||
FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05
|
||||
FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05
|
||||
)
|
||||
|
||||
// CreateFile and co. take flags or attributes together as one parameter.
|
||||
// Define alias until we can use generics to allow both
|
||||
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
|
||||
type FileFlagOrAttribute uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const ( // from winnt.h
|
||||
const (
|
||||
// from winnt.h
|
||||
|
||||
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
|
||||
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
|
||||
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
|
||||
|
|
@ -145,17 +171,51 @@ const ( // from winnt.h
|
|||
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
|
||||
)
|
||||
|
||||
// NtCreate* functions take a dedicated CreateOptions parameter.
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file
|
||||
type NTCreateOptions uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// From ntioapi.h
|
||||
|
||||
FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001
|
||||
FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002
|
||||
FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004
|
||||
FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008
|
||||
|
||||
FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010
|
||||
FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020
|
||||
FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040
|
||||
FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080
|
||||
|
||||
FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100
|
||||
FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200
|
||||
FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400
|
||||
FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800
|
||||
|
||||
FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000
|
||||
FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000
|
||||
FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000
|
||||
FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000
|
||||
)
|
||||
|
||||
type FileSQSFlag = FileFlagOrAttribute
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const ( // from winbase.h
|
||||
const (
|
||||
// from winbase.h
|
||||
|
||||
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
|
||||
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
|
||||
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
|
||||
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
|
||||
|
||||
SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000
|
||||
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000
|
||||
SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000
|
||||
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000
|
||||
)
|
||||
|
||||
// GetFinalPathNameByHandle flags
|
||||
|
|
|
|||
9
vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
9
vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -45,7 +42,7 @@ var (
|
|||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
)
|
||||
|
||||
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
|
|
@ -54,8 +51,8 @@ func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.
|
|||
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == windows.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
|
|
|
|||
8
vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
8
vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
|
|
@ -156,9 +156,7 @@ func connectEx(
|
|||
bytesSent *uint32,
|
||||
overlapped *windows.Overlapped,
|
||||
) (err error) {
|
||||
// todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN
|
||||
r1, _, e1 := syscall.Syscall9(connectExFunc.addr,
|
||||
7,
|
||||
r1, _, e1 := syscall.SyscallN(connectExFunc.addr,
|
||||
uintptr(s),
|
||||
uintptr(name),
|
||||
uintptr(namelen),
|
||||
|
|
@ -166,8 +164,8 @@ func connectEx(
|
|||
uintptr(sendDataLen),
|
||||
uintptr(unsafe.Pointer(bytesSent)),
|
||||
uintptr(unsafe.Pointer(overlapped)),
|
||||
0,
|
||||
0)
|
||||
)
|
||||
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = error(e1)
|
||||
|
|
|
|||
9
vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
generated
vendored
9
vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -48,7 +45,7 @@ var (
|
|||
)
|
||||
|
||||
func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -56,7 +53,7 @@ func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
|||
}
|
||||
|
||||
func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -64,7 +61,7 @@ func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err err
|
|||
}
|
||||
|
||||
func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
|
|
@ -62,7 +62,7 @@ func (b *WString) Free() {
|
|||
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
|
||||
// previous buffer back into pool.
|
||||
func (b *WString) ResizeTo(c uint32) uint32 {
|
||||
// allready sufficient (or n is 0)
|
||||
// already sufficient (or n is 0)
|
||||
if c <= b.Cap() {
|
||||
return b.Cap()
|
||||
}
|
||||
|
|
|
|||
125
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
125
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
|
|
@ -11,7 +11,6 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
|
|
@ -20,20 +19,44 @@ import (
|
|||
"github.com/Microsoft/go-winio/internal/fs"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW
|
||||
//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe
|
||||
//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type PipeConn interface {
|
||||
net.Conn
|
||||
Disconnect() error
|
||||
Flush() error
|
||||
}
|
||||
|
||||
// type aliases for mkwinsyscall code
|
||||
type (
|
||||
ntAccessMask = fs.AccessMask
|
||||
ntFileShareMode = fs.FileShareMode
|
||||
ntFileCreationDisposition = fs.NTFileCreationDisposition
|
||||
ntFileOptions = fs.NTCreateOptions
|
||||
)
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
// typedef struct _OBJECT_ATTRIBUTES {
|
||||
// ULONG Length;
|
||||
// HANDLE RootDirectory;
|
||||
// PUNICODE_STRING ObjectName;
|
||||
// ULONG Attributes;
|
||||
// PVOID SecurityDescriptor;
|
||||
// PVOID SecurityQualityOfService;
|
||||
// } OBJECT_ATTRIBUTES;
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes
|
||||
type objectAttributes struct {
|
||||
Length uintptr
|
||||
RootDirectory uintptr
|
||||
|
|
@ -49,6 +72,17 @@ type unicodeString struct {
|
|||
Buffer uintptr
|
||||
}
|
||||
|
||||
// typedef struct _SECURITY_DESCRIPTOR {
|
||||
// BYTE Revision;
|
||||
// BYTE Sbz1;
|
||||
// SECURITY_DESCRIPTOR_CONTROL Control;
|
||||
// PSID Owner;
|
||||
// PSID Group;
|
||||
// PACL Sacl;
|
||||
// PACL Dacl;
|
||||
// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR;
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor
|
||||
type securityDescriptor struct {
|
||||
Revision byte
|
||||
Sbz1 byte
|
||||
|
|
@ -80,6 +114,8 @@ type win32Pipe struct {
|
|||
path string
|
||||
}
|
||||
|
||||
var _ PipeConn = (*win32Pipe)(nil)
|
||||
|
||||
type win32MessageBytePipe struct {
|
||||
win32Pipe
|
||||
writeClosed bool
|
||||
|
|
@ -103,6 +139,10 @@ func (f *win32Pipe) SetDeadline(t time.Time) error {
|
|||
return f.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) Disconnect() error {
|
||||
return disconnectNamedPipe(f.win32File.handle)
|
||||
}
|
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
func (f *win32MessageBytePipe) CloseWrite() error {
|
||||
if f.writeClosed {
|
||||
|
|
@ -146,7 +186,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
|||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
|
||||
} else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
|
|
@ -164,21 +204,20 @@ func (s pipeAddress) String() string {
|
|||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) {
|
||||
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
return windows.Handle(0), ctx.Err()
|
||||
default:
|
||||
wh, err := fs.CreateFile(*path,
|
||||
h, err := fs.CreateFile(*path,
|
||||
access,
|
||||
0, // mode
|
||||
nil, // security attributes
|
||||
fs.OPEN_EXISTING,
|
||||
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS,
|
||||
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel),
|
||||
0, // template file handle
|
||||
)
|
||||
h := syscall.Handle(wh)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
|
|
@ -214,15 +253,33 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
|||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
|
||||
return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE))
|
||||
}
|
||||
|
||||
// PipeImpLevel is an enumeration of impersonation levels that may be set
|
||||
// when calling DialPipeAccessImpersonation.
|
||||
type PipeImpLevel uint32
|
||||
|
||||
const (
|
||||
PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS)
|
||||
PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION)
|
||||
PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION)
|
||||
PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION)
|
||||
)
|
||||
|
||||
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||
return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous)
|
||||
}
|
||||
|
||||
// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with
|
||||
// `access` at `impLevel` until `ctx` cancellation or timeout. The other
|
||||
// DialPipe* implementations use PipeImpLevelAnonymous.
|
||||
func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access))
|
||||
var h windows.Handle
|
||||
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -235,7 +292,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn,
|
|||
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
windows.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -255,7 +312,7 @@ type acceptResponse struct {
|
|||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle syscall.Handle
|
||||
firstHandle windows.Handle
|
||||
path string
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
|
|
@ -263,8 +320,8 @@ type win32PipeListener struct {
|
|||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
path16, err := syscall.UTF16FromString(path)
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) {
|
||||
path16, err := windows.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
|
@ -280,16 +337,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
|||
).Err(); err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
defer localFree(ntPath.Buffer)
|
||||
defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck
|
||||
oa.ObjectName = &ntPath
|
||||
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
|
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
if sd != nil {
|
||||
//todo: does `sdb` need to be allocated on the heap, or can go allocate it?
|
||||
l := uint32(len(sd))
|
||||
sdb := localAlloc(0, l)
|
||||
defer localFree(sdb)
|
||||
sdb, err := windows.LocalAlloc(0, l)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err)
|
||||
}
|
||||
defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||
} else {
|
||||
|
|
@ -298,7 +359,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
|||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
|
||||
}
|
||||
defer localFree(dacl)
|
||||
defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck
|
||||
|
||||
sdb := &securityDescriptor{
|
||||
Revision: 1,
|
||||
|
|
@ -314,27 +375,27 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
|||
typ |= windows.FILE_PIPE_MESSAGE_TYPE
|
||||
}
|
||||
|
||||
disposition := uint32(windows.FILE_OPEN)
|
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||
disposition := fs.FILE_OPEN
|
||||
access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE
|
||||
if first {
|
||||
disposition = windows.FILE_CREATE
|
||||
disposition = fs.FILE_CREATE
|
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
access = syscall.SYNCHRONIZE
|
||||
access = fs.SYNCHRONIZE
|
||||
}
|
||||
|
||||
timeout := int64(-50 * 10000) // 50ms
|
||||
|
||||
var (
|
||||
h syscall.Handle
|
||||
h windows.Handle
|
||||
iosb ioStatusBlock
|
||||
)
|
||||
err = ntCreateNamedPipeFile(&h,
|
||||
access,
|
||||
&oa,
|
||||
&iosb,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE,
|
||||
fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE,
|
||||
disposition,
|
||||
0,
|
||||
typ,
|
||||
|
|
@ -359,7 +420,7 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
|||
}
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
windows.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
|
|
@ -418,7 +479,7 @@ func (l *win32PipeListener) listenerRoutine() {
|
|||
closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
|
||||
}
|
||||
}
|
||||
syscall.Close(l.firstHandle)
|
||||
windows.Close(l.firstHandle)
|
||||
l.firstHandle = 0
|
||||
// Notify Close() and Accept() callers that the handle has been closed.
|
||||
close(l.doneCh)
|
||||
|
|
|
|||
9
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
9
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
|
|
@ -9,7 +9,6 @@ import (
|
|||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
|
|
@ -18,8 +17,8 @@ import (
|
|||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||
//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h windows.Handle) = GetCurrentThread
|
||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
|
@ -29,7 +28,7 @@ const (
|
|||
SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
|
||||
|
||||
//revive:disable-next-line:var-naming ALL_CAPS
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED
|
||||
ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
|
|
@ -177,7 +176,7 @@ func newThreadToken() (windows.Token, error) {
|
|||
}
|
||||
|
||||
var token windows.Token
|
||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
||||
err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token)
|
||||
if err != nil {
|
||||
rerr := revertToSelf()
|
||||
if rerr != nil {
|
||||
|
|
|
|||
37
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
37
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
|
|
@ -5,7 +5,7 @@ package winio
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
|
|
@ -15,10 +15,6 @@ import (
|
|||
//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
|
||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||
//sys localFree(mem uintptr) = LocalFree
|
||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||
|
||||
type AccountLookupError struct {
|
||||
Name string
|
||||
|
|
@ -64,7 +60,7 @@ func LookupSidByName(name string) (sid string, err error) {
|
|||
|
||||
var sidSize, sidNameUse, refDomainSize uint32
|
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
||||
if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sidBuffer := make([]byte, sidSize)
|
||||
|
|
@ -78,8 +74,8 @@ func LookupSidByName(name string) (sid string, err error) {
|
|||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||
localFree(uintptr(unsafe.Pointer(strBuffer)))
|
||||
sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||
_, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer)))
|
||||
return sid, nil
|
||||
}
|
||||
|
||||
|
|
@ -100,7 +96,7 @@ func LookupNameBySid(sid string) (name string, err error) {
|
|||
if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
defer localFree(uintptr(unsafe.Pointer(sidPtr)))
|
||||
defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck
|
||||
|
||||
var nameSize, refDomainSize, sidNameUse uint32
|
||||
err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
|
||||
|
|
@ -120,25 +116,18 @@ func LookupNameBySid(sid string) (name string, err error) {
|
|||
}
|
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
var sdBuffer uintptr
|
||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
|
||||
sd, err := windows.SecurityDescriptorFromString(sddl)
|
||||
if err != nil {
|
||||
return nil, &SddlConversionError{sddl, err}
|
||||
return nil, &SddlConversionError{Sddl: sddl, Err: err}
|
||||
}
|
||||
defer localFree(sdBuffer)
|
||||
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
|
||||
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
|
||||
return sd, nil
|
||||
b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||
var sddl *uint16
|
||||
// The returned string length seems to include an arbitrary number of terminating NULs.
|
||||
// Don't use it.
|
||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l {
|
||||
return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE)
|
||||
}
|
||||
defer localFree(uintptr(unsafe.Pointer(sddl)))
|
||||
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
|
||||
s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0]))
|
||||
return s.String(), nil
|
||||
}
|
||||
|
|
|
|||
5
vendor/github.com/Microsoft/go-winio/tools.go
generated
vendored
5
vendor/github.com/Microsoft/go-winio/tools.go
generated
vendored
|
|
@ -1,5 +0,0 @@
|
|||
//go:build tools
|
||||
|
||||
package winio
|
||||
|
||||
import _ "golang.org/x/tools/cmd/stringer"
|
||||
13
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
13
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -50,7 +47,7 @@ var (
|
|||
)
|
||||
|
||||
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
|
||||
r0, _, _ := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -67,7 +64,7 @@ func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virt
|
|||
}
|
||||
|
||||
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
|
||||
r0, _, _ := syscall.SyscallN(procCreateVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -75,7 +72,7 @@ func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, vi
|
|||
}
|
||||
|
||||
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
|
||||
r0, _, _ := syscall.SyscallN(procDetachVirtualDisk.Addr(), uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -83,7 +80,7 @@ func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, pro
|
|||
}
|
||||
|
||||
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
|
||||
r0, _, _ := syscall.SyscallN(procGetVirtualDiskPhysicalPath.Addr(), uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -100,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
|
|||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
r0, _, _ := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
|
|||
211
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
211
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -45,38 +42,34 @@ var (
|
|||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
|
||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
)
|
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
|
|
@ -84,7 +77,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou
|
|||
if releaseAll {
|
||||
_p0 = 1
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
err = errnoErr(e1)
|
||||
|
|
@ -92,33 +85,8 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou
|
|||
return
|
||||
}
|
||||
|
||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(str)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
|
||||
}
|
||||
|
||||
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -126,21 +94,15 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
|
|||
}
|
||||
|
||||
func convertStringSidToSid(str *uint16, sid **byte) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0)
|
||||
r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
|
||||
len = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func impersonateSelf(level uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -157,7 +119,7 @@ func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSiz
|
|||
}
|
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -165,7 +127,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
|
|||
}
|
||||
|
||||
func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -182,7 +144,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16,
|
|||
}
|
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
|
||||
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -199,7 +161,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *
|
|||
}
|
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -221,19 +183,19 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err
|
|||
}
|
||||
|
||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
var _p0 uint32
|
||||
if openAsSelf {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -241,14 +203,14 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool,
|
|||
}
|
||||
|
||||
func revertToSelf() (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
|
|
@ -261,14 +223,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce
|
|||
if processSecurity {
|
||||
_p2 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
|
|
@ -281,39 +243,39 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
|||
if processSecurity {
|
||||
_p2 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
|
||||
func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||
newport = syscall.Handle(r0)
|
||||
func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount))
|
||||
newport = windows.Handle(r0)
|
||||
if newport == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
|
|
@ -322,96 +284,93 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui
|
|||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == windows.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCurrentThread() (h syscall.Handle) {
|
||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
|
||||
h = syscall.Handle(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
|
||||
func disconnectNamedPipe(pipe windows.Handle) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
func getCurrentThread() (h windows.Handle) {
|
||||
r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr())
|
||||
h = windows.Handle(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
|
||||
func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func localFree(mem uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
|
||||
func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
|
||||
r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl)))
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved))
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status))
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||
r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
|
|||
27
vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go
generated
vendored
27
vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -75,7 +72,7 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsAttachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -104,7 +101,7 @@ func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsAttachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -128,7 +125,7 @@ func _hcsDestroyLayer(layerPath *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsDestroyLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsDestroyLayer.Addr(), uintptr(unsafe.Pointer(layerPath)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -152,7 +149,7 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsDetachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -181,7 +178,7 @@ func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsDetachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -220,7 +217,7 @@ func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uin
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsExportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -235,7 +232,7 @@ func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsFormatWritableLayerVhd.Addr(), uintptr(handle))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -250,7 +247,7 @@ func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr e
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsGetLayerVhdMountPath.Addr(), uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -284,7 +281,7 @@ func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uin
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsImportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -318,7 +315,7 @@ func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, o
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsInitializeWritableLayer.Addr(), uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -347,7 +344,7 @@ func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uin
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -381,7 +378,7 @@ func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint1
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSVolume.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
|
|||
4
vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go
generated
vendored
|
|
@ -14,14 +14,14 @@ import (
|
|||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
|
||||
// makeOpenFiles calls winio.NewOpenFile for each handle in a slice but closes all the handles
|
||||
// if there is an error.
|
||||
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
|
||||
fs := make([]io.ReadWriteCloser, len(hs))
|
||||
for i, h := range hs {
|
||||
if h != syscall.Handle(0) {
|
||||
if err == nil {
|
||||
fs[i], err = winio.MakeOpenFile(h)
|
||||
fs[i], err = winio.NewOpenFile(windows.Handle(h))
|
||||
}
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
|
|
|
|||
5
vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -69,7 +66,7 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16)
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHNSCall.Addr(), uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
|
|||
5
vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -46,6 +43,6 @@ var (
|
|||
)
|
||||
|
||||
func coTaskMemFree(buffer unsafe.Pointer) {
|
||||
syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0)
|
||||
syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(buffer))
|
||||
return
|
||||
}
|
||||
|
|
|
|||
9
vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -48,7 +45,7 @@ var (
|
|||
)
|
||||
|
||||
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -56,7 +53,7 @@ func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidO
|
|||
}
|
||||
|
||||
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -64,7 +61,7 @@ func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *
|
|||
}
|
||||
|
||||
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
|
|||
55
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go
generated
vendored
55
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -75,7 +72,7 @@ func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsCloseComputeSystem.Addr(), uintptr(computeSystem))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -90,7 +87,7 @@ func hcsCloseProcess(process HcsProcess) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsCloseProcess.Addr(), uintptr(process))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -119,7 +116,7 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsCreateComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -143,7 +140,7 @@ func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, proce
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsCreateProcess.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -167,7 +164,7 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsEnumerateComputeSystems.Addr(), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -191,7 +188,7 @@ func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsGetComputeSystemProperties.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -206,7 +203,7 @@ func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInforma
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsGetProcessInfo.Addr(), uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -221,7 +218,7 @@ func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, res
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsGetProcessProperties.Addr(), uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -245,7 +242,7 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsGetServiceProperties.Addr(), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -269,7 +266,7 @@ func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, res
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsModifyComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -293,7 +290,7 @@ func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (h
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsModifyProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -317,7 +314,7 @@ func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsModifyServiceSettings.Addr(), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -341,7 +338,7 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsOpenComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -356,7 +353,7 @@ func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, re
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsOpenProcess.Addr(), uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -380,7 +377,7 @@ func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **u
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsPauseComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -395,7 +392,7 @@ func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr,
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsRegisterComputeSystemCallback.Addr(), uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -410,7 +407,7 @@ func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context ui
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsRegisterProcessCallback.Addr(), uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -434,7 +431,7 @@ func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsResumeComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -458,7 +455,7 @@ func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **ui
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsSaveComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -482,7 +479,7 @@ func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsShutdownComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -506,7 +503,7 @@ func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsSignalProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -530,7 +527,7 @@ func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **u
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsStartComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -554,7 +551,7 @@ func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procHcsTerminateComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -569,7 +566,7 @@ func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsTerminateProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(result)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -584,7 +581,7 @@ func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsUnregisterComputeSystemCallback.Addr(), uintptr(callbackHandle))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -599,7 +596,7 @@ func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procHcsUnregisterProcessCallback.Addr(), uintptr(callbackHandle))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
|
|||
45
vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
generated
vendored
45
vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -77,7 +74,7 @@ func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64,
|
|||
}
|
||||
|
||||
func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
|
||||
r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -85,7 +82,7 @@ func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int6
|
|||
}
|
||||
|
||||
func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped))
|
||||
r1, _, e1 := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -102,7 +99,7 @@ func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtua
|
|||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
r1, _, e1 := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -123,7 +120,7 @@ func _activateLayer(info *driverInfo, id *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procActivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -156,7 +153,7 @@ func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC
|
|||
if len(descriptors) > 0 {
|
||||
_p2 = &descriptors[0]
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procCopyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -185,7 +182,7 @@ func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
|
||||
r0, _, _ := syscall.SyscallN(procCreateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -213,7 +210,7 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto
|
|||
if len(descriptors) > 0 {
|
||||
_p1 = &descriptors[0]
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procCreateSandboxLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -237,7 +234,7 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procDeactivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -261,7 +258,7 @@ func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procDestroyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -285,7 +282,7 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
|
||||
r0, _, _ := syscall.SyscallN(procExpandSandboxSize.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -318,7 +315,7 @@ func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
|
|||
if len(descriptors) > 0 {
|
||||
_p2 = &descriptors[0]
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procExportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -333,7 +330,7 @@ func getBaseImages(buffer **uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procGetBaseImages.Addr(), uintptr(unsafe.Pointer(buffer)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -357,7 +354,7 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procGetLayerMountPath.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -386,7 +383,7 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procGrantVmAccess.Addr(), uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -419,7 +416,7 @@ func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L
|
|||
if len(descriptors) > 0 {
|
||||
_p2 = &descriptors[0]
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procImportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -443,7 +440,7 @@ func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
|
||||
r0, _, _ := syscall.SyscallN(procLayerExists.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -467,7 +464,7 @@ func _nameToGuid(name *uint16, guid *_guid) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procNameToGuid.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -495,7 +492,7 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT
|
|||
if len(descriptors) > 0 {
|
||||
_p1 = &descriptors[0]
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procPrepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -519,7 +516,7 @@ func _processBaseImage(path *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procProcessBaseImage.Addr(), uintptr(unsafe.Pointer(path)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -543,7 +540,7 @@ func _processUtilityImage(path *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procProcessUtilityImage.Addr(), uintptr(unsafe.Pointer(path)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -567,7 +564,7 @@ func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procUnprepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
|
|||
7
vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go
generated
vendored
|
|
@ -4,7 +4,6 @@ package winapi
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
|
|
@ -14,11 +13,7 @@ import (
|
|||
// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice
|
||||
// for easier interop with Go APIs
|
||||
func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) {
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result))
|
||||
hdr.Data = uintptr(unsafe.Pointer(buffer))
|
||||
hdr.Cap = bufferLength
|
||||
hdr.Len = bufferLength
|
||||
|
||||
result = unsafe.Slice(buffer, bufferLength)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
115
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
115
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -109,7 +106,7 @@ var (
|
|||
)
|
||||
|
||||
func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
|
||||
r1, _, e1 := syscall.SyscallN(procLogonUserW.Addr(), uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -121,7 +118,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16,
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount))
|
||||
r0, _, _ := syscall.SyscallN(procBfSetupFilter.Addr(), uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -132,7 +129,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16,
|
|||
}
|
||||
|
||||
func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
|
||||
r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_PropertyW.Addr(), uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -143,7 +140,7 @@ func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyTyp
|
|||
}
|
||||
|
||||
func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_ListA.Addr(), uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -154,7 +151,7 @@ func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags u
|
|||
}
|
||||
|
||||
func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
|
||||
r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_List_SizeA.Addr(), uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -174,7 +171,7 @@ func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr er
|
|||
}
|
||||
|
||||
func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
|
||||
r0, _, _ := syscall.SyscallN(procCM_Locate_DevNodeW.Addr(), uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -189,7 +186,7 @@ func CimCloseImage(cimFSHandle FsHandle) (err error) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
|
||||
syscall.SyscallN(procCimCloseImage.Addr(), uintptr(cimFSHandle))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -198,7 +195,7 @@ func CimCloseStream(cimStreamHandle StreamHandle) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCimCloseStream.Addr(), 1, uintptr(cimStreamHandle), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimCloseStream.Addr(), uintptr(cimStreamHandle))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -213,7 +210,7 @@ func CimCommitImage(cimFSHandle FsHandle) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCimCommitImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimCommitImage.Addr(), uintptr(cimFSHandle))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -237,7 +234,7 @@ func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64,
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCimCreateAlternateStream.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimCreateAlternateStream.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -261,7 +258,7 @@ func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata,
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCimCreateFile.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimCreateFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -290,7 +287,7 @@ func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16)
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCimCreateHardLink.Addr(), 3, uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
|
||||
r0, _, _ := syscall.SyscallN(procCimCreateHardLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -314,7 +311,7 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCimCreateImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimCreateImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -338,7 +335,7 @@ func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCimDeletePath.Addr(), 2, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimDeletePath.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -353,7 +350,7 @@ func CimDismountImage(volumeID *g) (hr error) {
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCimDismountImage.Addr(), 1, uintptr(unsafe.Pointer(volumeID)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimDismountImage.Addr(), uintptr(unsafe.Pointer(volumeID)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -382,7 +379,7 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall6(procCimMountImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procCimMountImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -397,7 +394,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin
|
|||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCimWriteStream.Addr(), 3, uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize))
|
||||
r0, _, _ := syscall.SyscallN(procCimWriteStream.Addr(), uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -408,7 +405,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin
|
|||
}
|
||||
|
||||
func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) {
|
||||
r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0)
|
||||
r0, _, _ := syscall.SyscallN(procSetJobCompartmentId.Addr(), uintptr(handle), uintptr(compartmentId))
|
||||
if r0 != 0 {
|
||||
win32Err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -416,12 +413,12 @@ func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err
|
|||
}
|
||||
|
||||
func ClosePseudoConsole(hpc windows.Handle) {
|
||||
syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
|
||||
syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(hpc))
|
||||
return
|
||||
}
|
||||
|
||||
func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procCopyFileW.Addr(), 3, uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists))
|
||||
r1, _, e1 := syscall.SyscallN(procCopyFileW.Addr(), uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -429,7 +426,7 @@ func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32
|
|||
}
|
||||
|
||||
func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
|
||||
r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -440,7 +437,7 @@ func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Han
|
|||
}
|
||||
|
||||
func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0)
|
||||
r0, _, e1 := syscall.SyscallN(procCreateRemoteThread.Addr(), uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == 0 {
|
||||
err = errnoErr(e1)
|
||||
|
|
@ -449,13 +446,13 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
|
|||
}
|
||||
|
||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
|
||||
amount = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||
r1, _, e1 := syscall.SyscallN(procIsProcessInJob.Addr(), uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -463,18 +460,18 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
|
|||
}
|
||||
|
||||
func LocalAlloc(flags uint32, size int) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
|
||||
r0, _, _ := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(size))
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func LocalFree(ptr uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
|
||||
syscall.SyscallN(procLocalFree.Addr(), uintptr(ptr))
|
||||
return
|
||||
}
|
||||
|
||||
func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName)))
|
||||
r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName)))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == 0 {
|
||||
err = errnoErr(e1)
|
||||
|
|
@ -483,7 +480,7 @@ func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (h
|
|||
}
|
||||
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
||||
r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
|
|
@ -491,7 +488,7 @@ func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobOb
|
|||
}
|
||||
|
||||
func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0)
|
||||
r0, _, e1 := syscall.SyscallN(procQueryIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)))
|
||||
ret = uint32(r0)
|
||||
if ret == 0 {
|
||||
err = errnoErr(e1)
|
||||
|
|
@ -500,7 +497,7 @@ func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName
|
|||
}
|
||||
|
||||
func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
|
||||
r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(hPc), uintptr(size))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
@ -511,7 +508,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
|||
}
|
||||
|
||||
func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
|
||||
r0, _, e1 := syscall.SyscallN(procSearchPathW.Addr(), uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
|
||||
size = uint32(r0)
|
||||
if size == 0 {
|
||||
err = errnoErr(e1)
|
||||
|
|
@ -520,7 +517,7 @@ func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBuffer
|
|||
}
|
||||
|
||||
func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0)
|
||||
r0, _, e1 := syscall.SyscallN(procSetIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)))
|
||||
ret = uint32(r0)
|
||||
if ret == 0 {
|
||||
err = errnoErr(e1)
|
||||
|
|
@ -529,7 +526,7 @@ func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateContro
|
|||
}
|
||||
|
||||
func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) {
|
||||
r0, _, _ := syscall.Syscall6(procNetLocalGroupAddMembers.Addr(), 5, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries), 0)
|
||||
r0, _, _ := syscall.SyscallN(procNetLocalGroupAddMembers.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries))
|
||||
if r0 != 0 {
|
||||
status = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -537,7 +534,7 @@ func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32
|
|||
}
|
||||
|
||||
func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) {
|
||||
r0, _, _ := syscall.Syscall6(procNetLocalGroupGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procNetLocalGroupGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)))
|
||||
if r0 != 0 {
|
||||
status = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -545,7 +542,7 @@ func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, b
|
|||
}
|
||||
|
||||
func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) {
|
||||
r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procNetUserAdd.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)))
|
||||
if r0 != 0 {
|
||||
status = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -553,7 +550,7 @@ func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (
|
|||
}
|
||||
|
||||
func netUserDel(serverName *uint16, username *uint16) (status error) {
|
||||
r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procNetUserDel.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)))
|
||||
if r0 != 0 {
|
||||
status = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -561,25 +558,25 @@ func netUserDel(serverName *uint16, username *uint16) (status error) {
|
|||
}
|
||||
|
||||
func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
|
||||
r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
||||
r0, _, _ := syscall.SyscallN(procNtCreateJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
|
||||
r0, _, _ := syscall.SyscallN(procNtOpenDirectoryObject.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
||||
r0, _, _ := syscall.SyscallN(procNtOpenJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes)))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
|
@ -593,31 +590,31 @@ func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleE
|
|||
if restartScan {
|
||||
_p1 = 1
|
||||
}
|
||||
r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procNtQueryDirectoryObject.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
|
||||
r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func RtlNtStatusToDosError(status uint32) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosError.Addr(), uintptr(status))
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -625,7 +622,7 @@ func RtlNtStatusToDosError(status uint32) (winerr error) {
|
|||
}
|
||||
|
||||
func ORCloseHive(handle ORHKey) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(handle), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procORCloseHive.Addr(), uintptr(handle))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -633,7 +630,7 @@ func ORCloseHive(handle ORHKey) (win32err error) {
|
|||
}
|
||||
|
||||
func ORCloseKey(handle ORHKey) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procORCloseKey.Addr(), 1, uintptr(handle), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procORCloseKey.Addr(), uintptr(handle))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -641,7 +638,7 @@ func ORCloseKey(handle ORHKey) (win32err error) {
|
|||
}
|
||||
|
||||
func ORCreateHive(key *ORHKey) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procORCreateHive.Addr(), uintptr(unsafe.Pointer(key)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -658,7 +655,7 @@ func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, se
|
|||
}
|
||||
|
||||
func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall9(procORCreateKey.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procORCreateKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -675,7 +672,7 @@ func ORDeleteKey(handle ORHKey, subKey string) (win32err error) {
|
|||
}
|
||||
|
||||
func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procORDeleteKey.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(subKey)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procORDeleteKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -697,7 +694,7 @@ func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, d
|
|||
}
|
||||
|
||||
func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procORGetValue.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen)))
|
||||
r0, _, _ := syscall.SyscallN(procORGetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -709,7 +706,7 @@ func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) {
|
|||
if len(hiveHandles) > 0 {
|
||||
_p0 = &hiveHandles[0]
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procORMergeHives.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procORMergeHives.Addr(), uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -726,7 +723,7 @@ func OROpenHive(hivePath string, result *ORHKey) (win32err error) {
|
|||
}
|
||||
|
||||
func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procOROpenHive.Addr(), 2, uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)), 0)
|
||||
r0, _, _ := syscall.SyscallN(procOROpenHive.Addr(), uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -743,7 +740,7 @@ func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) {
|
|||
}
|
||||
|
||||
func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall(procOROpenKey.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result)))
|
||||
r0, _, _ := syscall.SyscallN(procOROpenKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -760,7 +757,7 @@ func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVe
|
|||
}
|
||||
|
||||
func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procORSaveHive.Addr(), uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
@ -777,7 +774,7 @@ func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, d
|
|||
}
|
||||
|
||||
func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procORSetValue.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen), 0)
|
||||
r0, _, _ := syscall.SyscallN(procORSetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
}
|
||||
|
|
|
|||
5
vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go
generated
vendored
|
|
@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error {
|
|||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
|
|
@ -46,7 +43,7 @@ var (
|
|||
)
|
||||
|
||||
func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
|
||||
r0, _, _ := syscall.SyscallN(procSetCurrentThreadCompartmentId.Addr(), uintptr(compartmentId))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
|
|
|
|||
80
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
80
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
|
@ -1079,6 +1079,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -1091,6 +1094,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -20938,6 +20944,9 @@ var awsPartition = partition{
|
|||
},
|
||||
"meetings-chime": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -20956,6 +20965,21 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1-fips",
|
||||
}: endpoint{
|
||||
Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -41977,6 +42001,62 @@ var awsusgovPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesisvideo": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"kms": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.54.2"
|
||||
const SDKVersion = "1.54.10"
|
||||
|
|
|
|||
227
vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
generated
vendored
227
vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
generated
vendored
|
|
@ -69,8 +69,7 @@ func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req *
|
|||
// groups attached to your Auto Scaling group, the instances are also registered
|
||||
// with the target groups.
|
||||
//
|
||||
// For more information, see Attach EC2 instances to your Auto Scaling group
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-instance-asg.html)
|
||||
// For more information, see Detach or attach instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-detach-attach-instances.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -969,7 +968,7 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig
|
|||
// about updating this limit, see Quotas for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-quotas.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// For more information, see Launch configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
|
||||
// For more information, see Launch configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-configurations.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Amazon EC2 Auto Scaling configures instances launched as part of an Auto
|
||||
|
|
@ -1534,7 +1533,7 @@ func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *reques
|
|||
// the underlying alarm action, but does not delete the alarm, even if it no
|
||||
// longer has an associated action.
|
||||
//
|
||||
// For more information, see Deleting a scaling policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/deleting-scaling-policy.html)
|
||||
// For more information, see Delete a scaling policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/deleting-scaling-policy.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -3599,8 +3598,8 @@ func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingAct
|
|||
// Gets information about the scaling activities in the account and Region.
|
||||
//
|
||||
// When scaling events occur, you see a record of the scaling activity in the
|
||||
// scaling activities. For more information, see Verifying a scaling activity
|
||||
// for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-verify-scaling-activity.html)
|
||||
// scaling activities. For more information, see Verify a scaling activity for
|
||||
// an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-verify-scaling-activity.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// If the scaling event succeeds, the value of the StatusCode element in the
|
||||
|
|
@ -4120,8 +4119,8 @@ func (c *AutoScaling) DescribeTerminationPolicyTypesRequest(input *DescribeTermi
|
|||
//
|
||||
// Describes the termination policies supported by Amazon EC2 Auto Scaling.
|
||||
//
|
||||
// For more information, see Work with Amazon EC2 Auto Scaling termination policies
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html)
|
||||
// For more information, see Configure termination policies for Amazon EC2 Auto
|
||||
// Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -4510,8 +4509,7 @@ func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req *
|
|||
// attached to the Auto Scaling group, the instances are deregistered from the
|
||||
// target groups.
|
||||
//
|
||||
// For more information, see Detach EC2 instances from your Auto Scaling group
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/detach-instance-asg.html)
|
||||
// For more information, see Detach or attach instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-detach-attach-instances.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -5521,8 +5519,8 @@ func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotification
|
|||
//
|
||||
// This configuration overwrites any existing configuration.
|
||||
//
|
||||
// For more information, see Getting Amazon SNS notifications when your Auto
|
||||
// Scaling group scales (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ASGettingNotifications.html)
|
||||
// For more information, see Amazon SNS notification options for Amazon EC2
|
||||
// Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-sns-notifications.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling
|
||||
|
|
@ -5723,7 +5721,7 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUp
|
|||
//
|
||||
// Creates or updates a scheduled scaling action for an Auto Scaling group.
|
||||
//
|
||||
// For more information, see Scheduled scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/schedule_time.html)
|
||||
// For more information, see Scheduled scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scheduled-scaling.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions
|
||||
|
|
@ -5825,20 +5823,18 @@ func (c *AutoScaling) PutWarmPoolRequest(input *PutWarmPoolInput) (req *request.
|
|||
// Creates or updates a warm pool for the specified Auto Scaling group. A warm
|
||||
// pool is a pool of pre-initialized EC2 instances that sits alongside the Auto
|
||||
// Scaling group. Whenever your application needs to scale out, the Auto Scaling
|
||||
// group can draw on the warm pool to meet its new desired capacity. For more
|
||||
// information and example configurations, see Warm pools for Amazon EC2 Auto
|
||||
// Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
// group can draw on the warm pool to meet its new desired capacity.
|
||||
//
|
||||
// This operation must be called from the Region in which the Auto Scaling group
|
||||
// was created. This operation cannot be called on an Auto Scaling group that
|
||||
// has a mixed instances policy or a launch template or launch configuration
|
||||
// that requests Spot Instances.
|
||||
// was created.
|
||||
//
|
||||
// You can view the instances in the warm pool using the DescribeWarmPool API
|
||||
// call. If you are no longer using a warm pool, you can delete it by calling
|
||||
// the DeleteWarmPool API.
|
||||
//
|
||||
// For more information, see Warm pools for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
|
|
@ -6036,7 +6032,8 @@ func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *r
|
|||
// Resumes the specified suspended auto scaling processes, or all suspended
|
||||
// process, for the specified Auto Scaling group.
|
||||
//
|
||||
// For more information, see Suspending and resuming scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
|
||||
// For more information, see Suspend and resume Amazon EC2 Auto Scaling processes
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -6245,7 +6242,7 @@ func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput)
|
|||
// that is lower than the current size of the group, the Auto Scaling group
|
||||
// uses its termination policy to determine which instances to terminate.
|
||||
//
|
||||
// For more information, see Manual scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-manual-scaling.html)
|
||||
// For more information, see Manual scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-manually.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -6333,7 +6330,8 @@ func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (r
|
|||
//
|
||||
// Sets the health status of the specified instance.
|
||||
//
|
||||
// For more information, see Health checks for Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
|
||||
// For more information, see Health checks for instances in an Auto Scaling
|
||||
// group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -6417,9 +6415,7 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI
|
|||
// Updates the instance protection settings of the specified instances. This
|
||||
// operation cannot be called on instances in a warm pool.
|
||||
//
|
||||
// For more information about preventing instances that are part of an Auto
|
||||
// Scaling group from terminating on scale in, see Using instance scale-in protection
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html)
|
||||
// For more information, see Use instance scale-in protection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling
|
||||
|
|
@ -6630,7 +6626,7 @@ func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req *
|
|||
//
|
||||
// If you suspend either the Launch or Terminate process types, it can prevent
|
||||
// other process types from functioning properly. For more information, see
|
||||
// Suspending and resuming scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
|
||||
// Suspend and resume Amazon EC2 Auto Scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// To resume processes that have been suspended, call the ResumeProcesses API.
|
||||
|
|
@ -6731,7 +6727,7 @@ func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *Terminat
|
|||
// Zones. If you decrement the desired capacity, your Auto Scaling group can
|
||||
// become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries
|
||||
// to rebalance the group, and rebalancing might terminate instances in other
|
||||
// zones. For more information, see Rebalancing activities (https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-benefits.html#AutoScalingBehavior.InstanceUsage)
|
||||
// zones. For more information, see Manual scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-manually.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
|
|
@ -8218,7 +8214,7 @@ type CreateAutoScalingGroupInput struct {
|
|||
//
|
||||
// The amount of time, in seconds, between one scaling activity ending and another
|
||||
// one starting due to simple scaling policies. For more information, see Scaling
|
||||
// cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html)
|
||||
// cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Default: 300 seconds
|
||||
|
|
@ -8256,8 +8252,8 @@ type CreateAutoScalingGroupInput struct {
|
|||
|
||||
// The unit of measurement for the value specified for desired capacity. Amazon
|
||||
// EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance
|
||||
// type selection only. For more information, see Creating an Auto Scaling group
|
||||
// using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html)
|
||||
// type selection only. For more information, see Create a mixed instances group
|
||||
// using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// By default, Amazon EC2 Auto Scaling specifies units, which translates into
|
||||
|
|
@ -8281,7 +8277,7 @@ type CreateAutoScalingGroupInput struct {
|
|||
//
|
||||
// The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health
|
||||
// check and cannot be disabled. For more information, see Health checks for
|
||||
// Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
|
||||
// instances in an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Only specify EC2 if you must clear a value that was previously set.
|
||||
|
|
@ -8291,8 +8287,8 @@ type CreateAutoScalingGroupInput struct {
|
|||
// Amazon EC2 Auto Scaling uses the configuration values from the specified
|
||||
// instance to create a new launch configuration. To get the instance ID, use
|
||||
// the Amazon EC2 DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html)
|
||||
// API operation. For more information, see Creating an Auto Scaling group using
|
||||
// an EC2 instance (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html)
|
||||
// API operation. For more information, see Create an Auto Scaling group using
|
||||
// parameters from an existing instance (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
InstanceId *string `min:"1" type:"string"`
|
||||
|
||||
|
|
@ -8316,8 +8312,8 @@ type CreateAutoScalingGroupInput struct {
|
|||
// or InstanceId).
|
||||
//
|
||||
// The launch template that is specified must be configured for use with an
|
||||
// Auto Scaling group. For more information, see Creating a launch template
|
||||
// for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html)
|
||||
// Auto Scaling group. For more information, see Create a launch template for
|
||||
// an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
LaunchTemplate *LaunchTemplateSpecification `type:"structure"`
|
||||
|
||||
|
|
@ -8333,7 +8329,7 @@ type CreateAutoScalingGroupInput struct {
|
|||
// The maximum amount of time, in seconds, that an instance can be in service.
|
||||
// The default is null. If specified, the value must be either 0 or a number
|
||||
// equal to or greater than 86,400 seconds (1 day). For more information, see
|
||||
// Replacing Auto Scaling instances based on maximum instance lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html)
|
||||
// Replace Auto Scaling instances based on maximum instance lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
MaxInstanceLifetime *int64 `type:"integer"`
|
||||
|
||||
|
|
@ -8360,7 +8356,7 @@ type CreateAutoScalingGroupInput struct {
|
|||
|
||||
// Indicates whether newly launched instances are protected from termination
|
||||
// by Amazon EC2 Auto Scaling when scaling in. For more information about preventing
|
||||
// instances from terminating on scale in, see Using instance scale-in protection
|
||||
// instances from terminating on scale in, see Use instance scale-in protection
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
NewInstancesProtectedFromScaleIn *bool `type:"boolean"`
|
||||
|
|
@ -8404,7 +8400,7 @@ type CreateAutoScalingGroupInput struct {
|
|||
|
||||
// A policy or a list of policies that are used to select the instance to terminate.
|
||||
// These policies are executed in the order that you list them. For more information,
|
||||
// see Work with Amazon EC2 Auto Scaling termination policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html)
|
||||
// see Configure termination policies for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour |
|
||||
|
|
@ -8732,8 +8728,8 @@ type CreateLaunchConfigurationInput struct {
|
|||
// the option to assign a public IPv4 address on the subnet.
|
||||
//
|
||||
// If you specify true, each instance in the Auto Scaling group receives a unique
|
||||
// public IPv4 address. For more information, see Launching Auto Scaling instances
|
||||
// in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html)
|
||||
// public IPv4 address. For more information, see Provide network connectivity
|
||||
// for your Auto Scaling instances using Amazon VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// If you specify this property, you must specify at least one subnet for VPCZoneIdentifier
|
||||
|
|
@ -8759,7 +8755,7 @@ type CreateLaunchConfigurationInput struct {
|
|||
// This optimization is not available with all instance types. Additional fees
|
||||
// are incurred when you enable EBS optimization for an instance type that is
|
||||
// not EBS-optimized by default. For more information, see Amazon EBS-optimized
|
||||
// instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
|
||||
// instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
//
|
||||
// The default value is false.
|
||||
|
|
@ -8773,7 +8769,7 @@ type CreateLaunchConfigurationInput struct {
|
|||
IamInstanceProfile *string `min:"1" type:"string"`
|
||||
|
||||
// The ID of the Amazon Machine Image (AMI) that was assigned during registration.
|
||||
// For more information, see Finding a Linux AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html)
|
||||
// For more information, see Find a Linux AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
//
|
||||
// If you specify InstanceId, an ImageId is not required.
|
||||
|
|
@ -8786,8 +8782,7 @@ type CreateLaunchConfigurationInput struct {
|
|||
// To create a launch configuration with a block device mapping or override
|
||||
// any other instance attributes, specify them as part of the same request.
|
||||
//
|
||||
// For more information, see Creating a launch configuration using an EC2 instance
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-lc-with-instanceID.html)
|
||||
// For more information, see Create a launch configuration (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
InstanceId *string `min:"1" type:"string"`
|
||||
|
||||
|
|
@ -8799,7 +8794,7 @@ type CreateLaunchConfigurationInput struct {
|
|||
// When detailed monitoring is enabled, Amazon CloudWatch generates metrics
|
||||
// every minute and your account is charged a fee. When you disable detailed
|
||||
// monitoring, CloudWatch generates metrics every 5 minutes. For more information,
|
||||
// see Configure Monitoring for Auto Scaling Instances (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html)
|
||||
// see Configure monitoring for Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
InstanceMonitoring *InstanceMonitoring `type:"structure"`
|
||||
|
||||
|
|
@ -8818,7 +8813,7 @@ type CreateLaunchConfigurationInput struct {
|
|||
KernelId *string `min:"1" type:"string"`
|
||||
|
||||
// The name of the key pair. For more information, see Amazon EC2 key pairs
|
||||
// and Linux instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
|
||||
// and Amazon EC2 instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
KeyName *string `min:"1" type:"string"`
|
||||
|
||||
|
|
@ -8828,8 +8823,8 @@ type CreateLaunchConfigurationInput struct {
|
|||
// LaunchConfigurationName is a required field
|
||||
LaunchConfigurationName *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// The metadata options for the instances. For more information, see Configuring
|
||||
// the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds)
|
||||
// The metadata options for the instances. For more information, see Configure
|
||||
// the instance metadata options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
MetadataOptions *InstanceMetadataOptions `type:"structure"`
|
||||
|
||||
|
|
@ -8837,9 +8832,7 @@ type CreateLaunchConfigurationInput struct {
|
|||
// dedicated tenancy runs on isolated, single-tenant hardware and can only be
|
||||
// launched into a VPC. To launch dedicated instances into a shared tenancy
|
||||
// VPC (a VPC with the instance placement tenancy attribute set to default),
|
||||
// you must set the value of this property to dedicated. For more information,
|
||||
// see Configuring instance tenancy with Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-dedicated-instances.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
// you must set the value of this property to dedicated.
|
||||
//
|
||||
// If you specify PlacementTenancy, you must specify at least one subnet for
|
||||
// VPCZoneIdentifier when you create your group.
|
||||
|
|
@ -8855,8 +8848,8 @@ type CreateLaunchConfigurationInput struct {
|
|||
RamdiskId *string `min:"1" type:"string"`
|
||||
|
||||
// A list that contains the security group IDs to assign to the instances in
|
||||
// the Auto Scaling group. For more information, see Control traffic to resources
|
||||
// using security groups (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html)
|
||||
// the Auto Scaling group. For more information, see Control traffic to your
|
||||
// Amazon Web Services resources using security groups (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html)
|
||||
// in the Amazon Virtual Private Cloud User Guide.
|
||||
SecurityGroups []*string `type:"list"`
|
||||
|
||||
|
|
@ -11984,7 +11977,7 @@ type DesiredConfiguration struct {
|
|||
|
||||
// Describes the launch template and the version of the launch template that
|
||||
// Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information
|
||||
// about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchTemplates.html)
|
||||
// about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-templates.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
LaunchTemplate *LaunchTemplateSpecification `type:"structure"`
|
||||
|
||||
|
|
@ -12474,7 +12467,8 @@ type DisableMetricsCollectionInput struct {
|
|||
//
|
||||
// If you omit this property, all metrics are disabled.
|
||||
//
|
||||
// For more information, see Auto Scaling group metrics (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics)
|
||||
// For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
Metrics []*string `type:"list"`
|
||||
}
|
||||
|
|
@ -12558,9 +12552,9 @@ type Ebs struct {
|
|||
|
||||
// Specifies whether the volume should be encrypted. Encrypted EBS volumes can
|
||||
// only be attached to instances that support Amazon EBS encryption. For more
|
||||
// information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances).
|
||||
// If your AMI uses encrypted volumes, you can also only launch it on supported
|
||||
// instance types.
|
||||
// information, see Requirements for Amazon EBS encryption (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html)
|
||||
// in the Amazon EBS User Guide. If your AMI uses encrypted volumes, you can
|
||||
// also only launch it on supported instance types.
|
||||
//
|
||||
// If you are creating a volume from a snapshot, you cannot create an unencrypted
|
||||
// volume from an encrypted snapshot. Also, you cannot specify a KMS key ID
|
||||
|
|
@ -12588,7 +12582,7 @@ type Ebs struct {
|
|||
// * io1: 100-64,000 IOPS
|
||||
//
|
||||
// For io1 volumes, we guarantee 64,000 IOPS only for Instances built on the
|
||||
// Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
|
||||
// Amazon Web Services Nitro System (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html).
|
||||
// Other instance families guarantee performance up to 32,000 IOPS.
|
||||
//
|
||||
// Iops is supported when the volume type is gp3 or io1 and required only when
|
||||
|
|
@ -12619,8 +12613,8 @@ type Ebs struct {
|
|||
// the size of the snapshot.
|
||||
VolumeSize *int64 `min:"1" type:"integer"`
|
||||
|
||||
// The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
// The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html)
|
||||
// in the Amazon EBS User Guide.
|
||||
//
|
||||
// Valid values: standard | io1 | gp2 | st1 | sc1 | gp3
|
||||
VolumeType *string `min:"1" type:"string"`
|
||||
|
|
@ -12772,7 +12766,8 @@ type EnableMetricsCollectionInput struct {
|
|||
// If you specify Granularity and don't specify any metrics, all metrics are
|
||||
// enabled.
|
||||
//
|
||||
// For more information, see Auto Scaling group metrics (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics)
|
||||
// For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
Metrics []*string `type:"list"`
|
||||
}
|
||||
|
|
@ -12906,7 +12901,8 @@ type EnabledMetric struct {
|
|||
//
|
||||
// * GroupAndWarmPoolTotalCapacity
|
||||
//
|
||||
// For more information, see Auto Scaling group metrics (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics)
|
||||
// For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
Metric *string `min:"1" type:"string"`
|
||||
}
|
||||
|
|
@ -13060,7 +13056,7 @@ type ExecutePolicyInput struct {
|
|||
// complete before executing the policy.
|
||||
//
|
||||
// Valid only if the policy type is SimpleScaling. For more information, see
|
||||
// Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html)
|
||||
// Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
HonorCooldown *bool `type:"boolean"`
|
||||
|
||||
|
|
@ -13951,7 +13947,8 @@ type Instance struct {
|
|||
LaunchTemplate *LaunchTemplateSpecification `type:"structure"`
|
||||
|
||||
// A description of the current lifecycle state. The Quarantined state is not
|
||||
// used. For information about lifecycle states, see Instance lifecycle (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html)
|
||||
// used. For more information, see Amazon EC2 Auto Scaling instance lifecycle
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-lifecycle.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// LifecycleState is a required field
|
||||
|
|
@ -14080,7 +14077,7 @@ type InstanceDetails struct {
|
|||
LaunchTemplate *LaunchTemplateSpecification `type:"structure"`
|
||||
|
||||
// The lifecycle state for the instance. The Quarantined state is not used.
|
||||
// For information about lifecycle states, see Instance lifecycle (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html)
|
||||
// For more information, see Amazon EC2 Auto Scaling instance lifecycle (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-lifecycle.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Valid values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService
|
||||
|
|
@ -14255,8 +14252,8 @@ func (s *InstanceMaintenancePolicy) SetMinHealthyPercentage(v int64) *InstanceMa
|
|||
return s
|
||||
}
|
||||
|
||||
// The metadata options for the instances. For more information, see Configuring
|
||||
// the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds)
|
||||
// The metadata options for the instances. For more information, see Configure
|
||||
// the instance metadata options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
type InstanceMetadataOptions struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
|
@ -14690,8 +14687,8 @@ func (s *InstanceRefreshWarmPoolProgress) SetPercentageComplete(v int64) *Instan
|
|||
// You must specify VCpuCount and MemoryMiB. All other attributes are optional.
|
||||
// Any unspecified optional attribute is set to its default.
|
||||
//
|
||||
// For more information, see Creating an Auto Scaling group using attribute-based
|
||||
// instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html)
|
||||
// For more information, see Create a mixed instances group using attribute-based
|
||||
// instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide. For help determining which instance
|
||||
// types match your attributes before you apply them to your Auto Scaling group,
|
||||
// see Preview instance types with specified attributes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html#ec2fleet-get-instance-types-from-instance-requirements)
|
||||
|
|
@ -15366,14 +15363,15 @@ type LaunchConfiguration struct {
|
|||
// IPv4 address on the subnet. If the instance is launched into a nondefault
|
||||
// subnet, the default is not to assign a public IPv4 address, unless you enabled
|
||||
// the option to assign a public IPv4 address on the subnet. For more information,
|
||||
// see Launching Auto Scaling instances in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html)
|
||||
// see Provide network connectivity for your Auto Scaling instances using Amazon
|
||||
// VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
AssociatePublicIpAddress *bool `type:"boolean"`
|
||||
|
||||
// The block device mapping entries that define the block devices to attach
|
||||
// to the instances at launch. By default, the block devices specified in the
|
||||
// block device mapping for the AMI are used. For more information, see Block
|
||||
// Device Mapping (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
|
||||
// device mappings (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
BlockDeviceMappings []*BlockDeviceMapping `type:"list"`
|
||||
|
||||
|
|
@ -15389,9 +15387,9 @@ type LaunchConfiguration struct {
|
|||
CreatedTime *time.Time `type:"timestamp" required:"true"`
|
||||
|
||||
// Specifies whether the launch configuration is optimized for EBS I/O (true)
|
||||
// or not (false). For more information, see Amazon EBS-Optimized Instances
|
||||
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) in
|
||||
// the Amazon EC2 User Guide for Linux Instances.
|
||||
// or not (false). For more information, see Amazon EBS-optimized instances
|
||||
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
EbsOptimized *bool `type:"boolean"`
|
||||
|
||||
// The name or the Amazon Resource Name (ARN) of the instance profile associated
|
||||
|
|
@ -15411,7 +15409,7 @@ type LaunchConfiguration struct {
|
|||
// Controls whether instances in this group are launched with detailed (true)
|
||||
// or basic (false) monitoring.
|
||||
//
|
||||
// For more information, see Configure Monitoring for Auto Scaling Instances
|
||||
// For more information, see Configure monitoring for Auto Scaling instances
|
||||
// (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
InstanceMonitoring *InstanceMonitoring `type:"structure"`
|
||||
|
|
@ -15428,7 +15426,7 @@ type LaunchConfiguration struct {
|
|||
|
||||
// The name of the key pair.
|
||||
//
|
||||
// For more information, see Amazon EC2 Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
|
||||
// For more information, see Amazon EC2 key pairs and Amazon EC2 instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
KeyName *string `min:"1" type:"string"`
|
||||
|
||||
|
|
@ -15440,33 +15438,29 @@ type LaunchConfiguration struct {
|
|||
// LaunchConfigurationName is a required field
|
||||
LaunchConfigurationName *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// The metadata options for the instances. For more information, see Configuring
|
||||
// the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds)
|
||||
// The metadata options for the instances. For more information, see Configure
|
||||
// the instance metadata options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
MetadataOptions *InstanceMetadataOptions `type:"structure"`
|
||||
|
||||
// The tenancy of the instance, either default or dedicated. An instance with
|
||||
// dedicated tenancy runs on isolated, single-tenant hardware and can only be
|
||||
// launched into a VPC.
|
||||
//
|
||||
// For more information, see Configuring instance tenancy with Amazon EC2 Auto
|
||||
// Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-dedicated-instances.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
PlacementTenancy *string `min:"1" type:"string"`
|
||||
|
||||
// The ID of the RAM disk associated with the AMI.
|
||||
RamdiskId *string `min:"1" type:"string"`
|
||||
|
||||
// A list that contains the security groups to assign to the instances in the
|
||||
// Auto Scaling group. For more information, see Security Groups for Your VPC
|
||||
// (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html)
|
||||
// Auto Scaling group. For more information, see Control traffic to your Amazon
|
||||
// Web Services resources using security groups (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html)
|
||||
// in the Amazon Virtual Private Cloud User Guide.
|
||||
SecurityGroups []*string `type:"list"`
|
||||
|
||||
// The maximum hourly price to be paid for any Spot Instance launched to fulfill
|
||||
// the request. Spot Instances are launched when the price you specify exceeds
|
||||
// the current Spot price. For more information, see Requesting Spot Instances
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html)
|
||||
// for fault-tolerant and flexible applications (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-template-spot-instances.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
SpotPrice *string `min:"1" type:"string"`
|
||||
|
||||
|
|
@ -15722,7 +15716,7 @@ type LaunchTemplateOverrides struct {
|
|||
// The instance type, such as m3.xlarge. You must specify an instance type that
|
||||
// is supported in your requested Region and Availability Zones. For more information,
|
||||
// see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
//
|
||||
// You can specify up to 40 instance types per Auto Scaling group.
|
||||
InstanceType *string `min:"1" type:"string"`
|
||||
|
|
@ -15748,8 +15742,8 @@ type LaunchTemplateOverrides struct {
|
|||
// For example, if there are two units remaining to fulfill capacity, and Amazon
|
||||
// EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five
|
||||
// units, the instance is launched, and the desired capacity is exceeded by
|
||||
// three units. For more information, see Configuring instance weighting for
|
||||
// Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html)
|
||||
// three units. For more information, see Configure an Auto Scaling group to
|
||||
// use instance weights (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of
|
||||
// 1–999.
|
||||
//
|
||||
|
|
@ -15834,7 +15828,7 @@ func (s *LaunchTemplateOverrides) SetWeightedCapacity(v string) *LaunchTemplateO
|
|||
|
||||
// Describes the launch template and the version of the launch template that
|
||||
// Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information
|
||||
// about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchTemplates.html)
|
||||
// about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-templates.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
type LaunchTemplateSpecification struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
|
@ -16087,7 +16081,7 @@ type LifecycleHookSpecification struct {
|
|||
|
||||
// The ARN of the IAM role that allows the Auto Scaling group to publish to
|
||||
// the specified notification target. For information about creating this role,
|
||||
// see Configure a notification target for a lifecycle hook (https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target)
|
||||
// see Prepare to add a lifecycle hook to your Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Valid only if the notification target is an Amazon SNS topic or an Amazon
|
||||
|
|
@ -17240,14 +17234,21 @@ type PredictiveScalingConfiguration struct {
|
|||
//
|
||||
// The following are possible values:
|
||||
//
|
||||
// * HonorMaxCapacity - Amazon EC2 Auto Scaling cannot scale out capacity
|
||||
// higher than the maximum capacity. The maximum capacity is enforced as
|
||||
// a hard limit.
|
||||
// * HonorMaxCapacity - Amazon EC2 Auto Scaling can't increase the maximum
|
||||
// capacity of the group when the forecast capacity is close to or exceeds
|
||||
// the maximum capacity.
|
||||
//
|
||||
// * IncreaseMaxCapacity - Amazon EC2 Auto Scaling can scale out capacity
|
||||
// higher than the maximum capacity when the forecast capacity is close to
|
||||
// or exceeds the maximum capacity. The upper limit is determined by the
|
||||
// forecasted capacity and the value for MaxCapacityBuffer.
|
||||
// * IncreaseMaxCapacity - Amazon EC2 Auto Scaling can increase the maximum
|
||||
// capacity of the group when the forecast capacity is close to or exceeds
|
||||
// the maximum capacity. The upper limit is determined by the forecasted
|
||||
// capacity and the value for MaxCapacityBuffer.
|
||||
//
|
||||
// Use caution when allowing the maximum capacity to be automatically increased.
|
||||
// This can lead to more instances being launched than intended if the increased
|
||||
// maximum capacity is not monitored and managed. The increased maximum capacity
|
||||
// then becomes the new normal maximum capacity for the Auto Scaling group until
|
||||
// you manually update it. The maximum capacity does not automatically decrease
|
||||
// back to the original maximum.
|
||||
MaxCapacityBreachBehavior *string `type:"string" enum:"PredictiveScalingMaxCapacityBreachBehavior"`
|
||||
|
||||
// The size of the capacity buffer to use when the forecast capacity is close
|
||||
|
|
@ -17977,7 +17978,7 @@ func (s *PredictiveScalingPredefinedScalingMetric) SetResourceLabel(v string) *P
|
|||
|
||||
// Describes a process type.
|
||||
//
|
||||
// For more information, see Scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types)
|
||||
// For more information, see Types of processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
type ProcessType struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
|
@ -18335,14 +18336,14 @@ type PutScalingPolicyInput struct {
|
|||
// cooldown.
|
||||
//
|
||||
// Valid only if the policy type is SimpleScaling. For more information, see
|
||||
// Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html)
|
||||
// Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Default: None
|
||||
Cooldown *int64 `type:"integer"`
|
||||
|
||||
// Indicates whether the scaling policy is enabled or disabled. The default
|
||||
// is enabled. For more information, see Disabling a scaling policy for an Auto
|
||||
// is enabled. For more information, see Disable a scaling policy for an Auto
|
||||
// Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enable-disable-scaling-policy.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
Enabled *bool `type:"boolean"`
|
||||
|
|
@ -19122,7 +19123,7 @@ type RefreshPreferences struct {
|
|||
// number must be unique. To replace all instances in the Auto Scaling group,
|
||||
// the last number in the array must be 100.
|
||||
//
|
||||
// For usage examples, see Adding checkpoints to an instance refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-adding-checkpoints-instance-refresh.html)
|
||||
// For usage examples, see Add checkpoints to an instance refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-adding-checkpoints-instance-refresh.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
CheckpointPercentages []*int64 `type:"list"`
|
||||
|
||||
|
|
@ -20132,9 +20133,9 @@ type SetInstanceHealthInput struct {
|
|||
// Set this to False, to have the call not respect the grace period associated
|
||||
// with the group.
|
||||
//
|
||||
// For more information about the health check grace period, see CreateAutoScalingGroup
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_CreateAutoScalingGroup.html)
|
||||
// in the Amazon EC2 Auto Scaling API Reference.
|
||||
// For more information about the health check grace period, see Set the health
|
||||
// check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
ShouldRespectGracePeriod *bool `type:"boolean"`
|
||||
}
|
||||
|
||||
|
|
@ -20590,7 +20591,7 @@ func (s SuspendProcessesOutput) GoString() string {
|
|||
|
||||
// Describes an auto scaling process that has been suspended.
|
||||
//
|
||||
// For more information, see Scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types)
|
||||
// For more information, see Types of processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
type SuspendedProcess struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
|
@ -21442,7 +21443,7 @@ type UpdateAutoScalingGroupInput struct {
|
|||
//
|
||||
// The amount of time, in seconds, between one scaling activity ending and another
|
||||
// one starting due to simple scaling policies. For more information, see Scaling
|
||||
// cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html)
|
||||
// cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
DefaultCooldown *int64 `type:"integer"`
|
||||
|
||||
|
|
@ -21474,8 +21475,8 @@ type UpdateAutoScalingGroupInput struct {
|
|||
|
||||
// The unit of measurement for the value specified for desired capacity. Amazon
|
||||
// EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance
|
||||
// type selection only. For more information, see Creating an Auto Scaling group
|
||||
// using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html)
|
||||
// type selection only. For more information, see Create a mixed instances group
|
||||
// using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// By default, Amazon EC2 Auto Scaling specifies units, which translates into
|
||||
|
|
@ -21497,7 +21498,7 @@ type UpdateAutoScalingGroupInput struct {
|
|||
//
|
||||
// The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health
|
||||
// check and cannot be disabled. For more information, see Health checks for
|
||||
// Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
|
||||
// instances in an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Only specify EC2 if you must clear a value that was previously set.
|
||||
|
|
@ -21544,7 +21545,7 @@ type UpdateAutoScalingGroupInput struct {
|
|||
|
||||
// Indicates whether newly launched instances are protected from termination
|
||||
// by Amazon EC2 Auto Scaling when scaling in. For more information about preventing
|
||||
// instances from terminating on scale in, see Using instance scale-in protection
|
||||
// instances from terminating on scale in, see Use instance scale-in protection
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
NewInstancesProtectedFromScaleIn *bool `type:"boolean"`
|
||||
|
|
@ -21566,7 +21567,7 @@ type UpdateAutoScalingGroupInput struct {
|
|||
|
||||
// A policy or a list of policies that are used to select the instances to terminate.
|
||||
// The policies are executed in the order that you list them. For more information,
|
||||
// see Work with Amazon EC2 Auto Scaling termination policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html)
|
||||
// see Configure termination policies for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour |
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go
generated
vendored
|
|
@ -7,7 +7,7 @@
|
|||
// EC2 instances based on user-defined scaling policies, scheduled actions,
|
||||
// and health checks.
|
||||
//
|
||||
// For more information, see the Amazon EC2 Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/ec2/userguide/)
|
||||
// For more information, see the Amazon EC2 Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html)
|
||||
// and the Amazon EC2 Auto Scaling API Reference (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/Welcome.html).
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01 for more information on this service.
|
||||
|
|
|
|||
60
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
60
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
|
|
@ -196916,6 +196916,51 @@ const (
|
|||
|
||||
// InstanceTypeU7in32tb224xlarge is a InstanceType enum value
|
||||
InstanceTypeU7in32tb224xlarge = "u7in-32tb.224xlarge"
|
||||
|
||||
// InstanceTypeU7ib12tb224xlarge is a InstanceType enum value
|
||||
InstanceTypeU7ib12tb224xlarge = "u7ib-12tb.224xlarge"
|
||||
|
||||
// InstanceTypeC7gnMetal is a InstanceType enum value
|
||||
InstanceTypeC7gnMetal = "c7gn.metal"
|
||||
|
||||
// InstanceTypeR8gMedium is a InstanceType enum value
|
||||
InstanceTypeR8gMedium = "r8g.medium"
|
||||
|
||||
// InstanceTypeR8gLarge is a InstanceType enum value
|
||||
InstanceTypeR8gLarge = "r8g.large"
|
||||
|
||||
// InstanceTypeR8gXlarge is a InstanceType enum value
|
||||
InstanceTypeR8gXlarge = "r8g.xlarge"
|
||||
|
||||
// InstanceTypeR8g2xlarge is a InstanceType enum value
|
||||
InstanceTypeR8g2xlarge = "r8g.2xlarge"
|
||||
|
||||
// InstanceTypeR8g4xlarge is a InstanceType enum value
|
||||
InstanceTypeR8g4xlarge = "r8g.4xlarge"
|
||||
|
||||
// InstanceTypeR8g8xlarge is a InstanceType enum value
|
||||
InstanceTypeR8g8xlarge = "r8g.8xlarge"
|
||||
|
||||
// InstanceTypeR8g12xlarge is a InstanceType enum value
|
||||
InstanceTypeR8g12xlarge = "r8g.12xlarge"
|
||||
|
||||
// InstanceTypeR8g16xlarge is a InstanceType enum value
|
||||
InstanceTypeR8g16xlarge = "r8g.16xlarge"
|
||||
|
||||
// InstanceTypeR8g24xlarge is a InstanceType enum value
|
||||
InstanceTypeR8g24xlarge = "r8g.24xlarge"
|
||||
|
||||
// InstanceTypeR8g48xlarge is a InstanceType enum value
|
||||
InstanceTypeR8g48xlarge = "r8g.48xlarge"
|
||||
|
||||
// InstanceTypeR8gMetal24xl is a InstanceType enum value
|
||||
InstanceTypeR8gMetal24xl = "r8g.metal-24xl"
|
||||
|
||||
// InstanceTypeR8gMetal48xl is a InstanceType enum value
|
||||
InstanceTypeR8gMetal48xl = "r8g.metal-48xl"
|
||||
|
||||
// InstanceTypeMac2M1ultraMetal is a InstanceType enum value
|
||||
InstanceTypeMac2M1ultraMetal = "mac2-m1ultra.metal"
|
||||
)
|
||||
|
||||
// InstanceType_Values returns all elements of the InstanceType enum
|
||||
|
|
@ -197726,6 +197771,21 @@ func InstanceType_Values() []string {
|
|||
InstanceTypeU7in16tb224xlarge,
|
||||
InstanceTypeU7in24tb224xlarge,
|
||||
InstanceTypeU7in32tb224xlarge,
|
||||
InstanceTypeU7ib12tb224xlarge,
|
||||
InstanceTypeC7gnMetal,
|
||||
InstanceTypeR8gMedium,
|
||||
InstanceTypeR8gLarge,
|
||||
InstanceTypeR8gXlarge,
|
||||
InstanceTypeR8g2xlarge,
|
||||
InstanceTypeR8g4xlarge,
|
||||
InstanceTypeR8g8xlarge,
|
||||
InstanceTypeR8g12xlarge,
|
||||
InstanceTypeR8g16xlarge,
|
||||
InstanceTypeR8g24xlarge,
|
||||
InstanceTypeR8g48xlarge,
|
||||
InstanceTypeR8gMetal24xl,
|
||||
InstanceTypeR8gMetal48xl,
|
||||
InstanceTypeMac2M1ultraMetal,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
|
||||
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
|
|
@ -12,7 +13,6 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -287,7 +287,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
|
|||
maps.Copy(*annotations, d.uploadedAnnotations)
|
||||
}
|
||||
|
||||
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
|
||||
// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
|
||||
// and the original srcInfo (which the caller guarantees has been validated).
|
||||
// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
|
||||
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
|
|
@ -25,7 +26,6 @@ import (
|
|||
encconfig "github.com/containers/ocicrypt/config"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
|
|
|||
12
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
12
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
|
|
@ -2,13 +2,13 @@ package copy
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/ocicrypt"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
|
||||
|
|
@ -47,13 +47,17 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
|
|||
desc := imgspecv1.Descriptor{
|
||||
Annotations: stream.info.Annotations,
|
||||
}
|
||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||
// DecryptLayer supposedly returns a digest of the decrypted stream.
|
||||
// In pratice, that value is never set in the current implementation.
|
||||
// And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key,
|
||||
// i.e. it doesn’t authenticate the origin of the metadata in any way.
|
||||
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
stream.info.Digest = decryptedDigest
|
||||
stream.info.Digest = ""
|
||||
stream.info.Size = -1
|
||||
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
|
||||
return strings.HasPrefix(k, "org.opencontainers.image.enc")
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
6
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
|
|
@ -13,7 +14,6 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
||||
|
|
@ -74,7 +74,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
|||
srcType := in.srcMIMEType
|
||||
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
||||
if srcType != normalizedSrcType {
|
||||
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
|
||||
logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType)
|
||||
srcType = normalizedSrcType
|
||||
}
|
||||
|
||||
|
|
@ -237,7 +237,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
|
|||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||
logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||
if len(prioritizedTypes.list) == 0 {
|
||||
return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
|
|
@ -5,6 +5,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
|
@ -17,8 +19,6 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type instanceCopyKind int
|
||||
|
|
|
|||
7
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
7
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
|
|
@ -151,12 +152,18 @@ type blobChunkAccessorProxy struct {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
start := time.Now()
|
||||
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
|
||||
if err == nil {
|
||||
total := int64(0)
|
||||
for _, c := range chunks {
|
||||
// do not update the progress bar if there is a chunk with unknown length.
|
||||
if c.Length == math.MaxUint64 {
|
||||
return rc, errs, err
|
||||
}
|
||||
total += int64(c.Length)
|
||||
}
|
||||
s.bar.EwmaIncrInt64(total, time.Since(start))
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
|
@ -25,7 +26,6 @@ import (
|
|||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb/v8"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
|
|
@ -707,7 +707,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
|
||||
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
|
||||
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
|
||||
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
|
||||
canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression
|
||||
|
||||
var requiredCompression *compressiontypes.Algorithm
|
||||
if ic.requireCompressionFormatMatch {
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
3
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
@ -263,7 +264,7 @@ func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error
|
|||
|
||||
// returns true if path exists
|
||||
func pathExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
err := fileutils.Exists(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
4
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
|
|
@ -4,6 +4,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
)
|
||||
|
||||
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
|
||||
|
|
@ -11,7 +13,7 @@ import (
|
|||
// a non-existent name (but not a symlink pointing to a non-existent name)
|
||||
// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
|
||||
func ResolvePathToFullyExplicit(path string) (string, error) {
|
||||
switch _, err := os.Lstat(path); {
|
||||
switch err := fileutils.Lexists(path); {
|
||||
case err == nil:
|
||||
return resolveExistingPathToFullyExplicit(path)
|
||||
case os.IsNotExist(err):
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
|
|
@ -78,7 +78,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
|
|||
}
|
||||
nt, ok := parsedTag.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
|
||||
return nil, fmt.Errorf("Invalid tag %q (%s): does not contain a tag", tag, parsedTag.String())
|
||||
}
|
||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||
if err != nil {
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
|
|
@ -116,7 +116,7 @@ func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) err
|
|||
return fmt.Errorf("parsing docker load progress: %w", err)
|
||||
}
|
||||
if msg.Error != nil {
|
||||
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
|
||||
return fmt.Errorf("docker engine reported: %q", msg.Error.Message)
|
||||
}
|
||||
}
|
||||
return nil // No error reported = success
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
|
|
@ -21,10 +21,10 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
|
|
|
|||
46
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
46
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/useragent"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
|
|
@ -25,6 +26,7 @@ import (
|
|||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
|
|
@ -186,7 +188,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
|||
}
|
||||
|
||||
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
|
||||
_, err := os.Stat(fullCertDirPath)
|
||||
err := fileutils.Exists(fullCertDirPath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
|
@ -497,8 +499,8 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
|
|||
// Checks if the auth headers in the response contain an indication of a failed
|
||||
// authorizdation because of an "insufficient_scope" error. If that's the case,
|
||||
// returns the required scope to be used for fetching a new token.
|
||||
func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
|
||||
if err == nil && res.StatusCode == http.StatusUnauthorized {
|
||||
func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) {
|
||||
if res.StatusCode == http.StatusUnauthorized {
|
||||
challenges := parseAuthHeader(res.Header)
|
||||
for _, challenge := range challenges {
|
||||
if challenge.Scheme == "bearer" {
|
||||
|
|
@ -557,6 +559,9 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
|||
attempts := 0
|
||||
for {
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attempts++
|
||||
|
||||
// By default we use pre-defined scopes per operation. In
|
||||
|
|
@ -572,27 +577,29 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
|||
// We also cannot retry with a body (stream != nil) as stream
|
||||
// was already read
|
||||
if attempts == 1 && stream == nil && auth != noAuth {
|
||||
if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
|
||||
if retry, newScope := needsRetryWithUpdatedScope(res); retry {
|
||||
logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
|
||||
res.Body.Close()
|
||||
// Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
|
||||
// expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
|
||||
// for more than one extra scope.
|
||||
res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extraScope = newScope
|
||||
}
|
||||
}
|
||||
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
|
||||
if res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
stream != nil || // We can't retry with a body (which is not restartable in the general case)
|
||||
attempts == backoffNumIterations {
|
||||
return res, err
|
||||
return res, nil
|
||||
}
|
||||
// close response body before retry or context done
|
||||
res.Body.Close()
|
||||
|
||||
delay = parseRetryAfter(res, delay)
|
||||
if delay > backoffMaxDelay {
|
||||
delay = backoffMaxDelay
|
||||
}
|
||||
delay = min(parseRetryAfter(res, delay), backoffMaxDelay)
|
||||
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
@ -671,10 +678,14 @@ func parseRegistryWarningHeader(header string) string {
|
|||
|
||||
// warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
|
||||
// distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
|
||||
if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) {
|
||||
header, ok := strings.CutPrefix(header, expectedPrefix)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
header, ok = strings.CutSuffix(header, expectedSuffix)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)]
|
||||
|
||||
// ”Recipients that process the value of a quoted-string MUST handle a quoted-pair
|
||||
// as if it were replaced by the octet following the backslash.”, so let’s do that…
|
||||
|
|
@ -1009,11 +1020,7 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
|
|||
if remoteErrors == nil {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0])
|
||||
for _, e := range remoteErrors[1:] {
|
||||
err = fmt.Errorf("%s, %w", err, e)
|
||||
}
|
||||
return nil, 0, err
|
||||
return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors))
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
|
|
@ -1090,6 +1097,11 @@ func isManifestUnknownError(err error) bool {
|
|||
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
|
||||
return true
|
||||
}
|
||||
// Harbor v2.10.2
|
||||
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") {
|
||||
return true
|
||||
}
|
||||
|
||||
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
|
||||
// but specifies that the HTTP status must be 404.
|
||||
var unexpected *unexpectedHTTPResponseError
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
9
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
|
||||
|
|
@ -90,6 +91,14 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
|||
}
|
||||
for _, tag := range tagsHolder.Tags {
|
||||
if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
|
||||
// Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
|
||||
// contrary to the tag format specified in
|
||||
// https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
|
||||
// include digests in the list.
|
||||
if _, err := digest.Parse(tag); err == nil {
|
||||
logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag)
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err)
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
|
|
|
|||
39
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
39
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
|
|
@ -8,10 +8,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
|
|
@ -34,8 +36,6 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type dockerImageDestination struct {
|
||||
|
|
@ -347,35 +347,24 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
|
||||
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{
|
||||
CanSubstitute: options.CanSubstitute,
|
||||
PossibleManifestFormats: options.PossibleManifestFormats,
|
||||
RequiredCompression: options.RequiredCompression,
|
||||
})
|
||||
for _, candidate := range candidates {
|
||||
var err error
|
||||
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
var candidateRepo reference.Named
|
||||
if !candidate.UnknownLocation {
|
||||
var err error
|
||||
candidateRepo, err = parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) {
|
||||
if !candidate.UnknownLocation {
|
||||
logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(),
|
||||
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
} else {
|
||||
logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(),
|
||||
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !candidate.UnknownLocation {
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
if candidate.CompressionAlgorithm != nil {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
}
|
||||
|
|
@ -390,8 +379,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
continue
|
||||
}
|
||||
} else {
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
|
||||
if candidate.CompressionAlgorithm != nil {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name())
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
|
||||
}
|
||||
|
|
@ -442,8 +431,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
return true, private.ReusedBlob{
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: compressionOperation,
|
||||
CompressionAlgorithm: compressionAlgorithm}, nil
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
|
||||
}
|
||||
|
||||
return false, private.ReusedBlob{}, nil
|
||||
|
|
|
|||
23
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
23
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
|
|
@ -260,9 +261,15 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
|||
}
|
||||
currentOffset += toSkip
|
||||
}
|
||||
var reader io.Reader
|
||||
if c.Length == math.MaxUint64 {
|
||||
reader = body
|
||||
} else {
|
||||
reader = io.LimitReader(body, int64(c.Length))
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
stream: io.NopCloser(reader),
|
||||
consumeStream: true,
|
||||
}
|
||||
streams <- s
|
||||
|
|
@ -343,12 +350,24 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
headers := make(map[string][]string)
|
||||
|
||||
rangeVals := make([]string, 0, len(chunks))
|
||||
lastFound := false
|
||||
for _, c := range chunks {
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||
if lastFound {
|
||||
return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk")
|
||||
}
|
||||
// If the Length is set to -1, then request anything after the specified offset.
|
||||
if c.Length == math.MaxUint64 {
|
||||
lastFound = true
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset))
|
||||
} else {
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||
}
|
||||
}
|
||||
|
||||
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
|
||||
|
|
|
|||
12
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
|
|
@ -54,16 +54,12 @@ type dockerReference struct {
|
|||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, "//") {
|
||||
refString, ok := strings.CutPrefix(refString, "//")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
|
||||
}
|
||||
// Check if ref has UnknownDigestSuffix suffixed to it
|
||||
unknownDigest := false
|
||||
if strings.HasSuffix(refString, UnknownDigestSuffix) {
|
||||
unknownDigest = true
|
||||
refString = strings.TrimSuffix(refString, UnknownDigestSuffix)
|
||||
}
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
|
||||
refString, unknownDigest := strings.CutSuffix(refString, UnknownDigestSuffix)
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
|
|
@ -231,7 +231,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
return nil, fmt.Errorf("Error reading tar archive component %q: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
|
|
@ -262,7 +262,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
|
|||
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := r.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading tar component %s: %w", path, err)
|
||||
return nil, fmt.Errorf("loading tar component %q: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
|
|
|
|||
10
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
10
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
|
|
@ -95,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
|||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
|
||||
return fmt.Errorf("decoding tar config %q: %w", tarManifest.Config, err)
|
||||
}
|
||||
if parsedConfig.RootFS == nil {
|
||||
return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
||||
return fmt.Errorf("Invalid image config (rootFS is not set): %q", tarManifest.Config)
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||
|
|
@ -144,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
|||
}
|
||||
layerPath := path.Clean(tarManifest.Layers[i])
|
||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||
return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
return nil, fmt.Errorf("Layer tarfile %q used for two different DiffID values", layerPath)
|
||||
}
|
||||
li := &layerInfo{ // A new element in each iteration
|
||||
path: layerPath,
|
||||
|
|
@ -179,7 +179,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
|||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
|
||||
return nil, fmt.Errorf("auto-decompressing %q to determine its size: %w", layerPath, err)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
|
|
@ -187,7 +187,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
|||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
|
||||
return nil, fmt.Errorf("reading %q to find its size: %w", layerPath, err)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
|
|
|
|||
14
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
14
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
|
|
@ -9,6 +9,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -19,7 +20,6 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
|
||||
|
|
@ -164,7 +164,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
|||
return fmt.Errorf("marshaling layer config: %w", err)
|
||||
}
|
||||
delete(layerConfig, "layer_id")
|
||||
layerID := digest.Canonical.FromBytes(b).Hex()
|
||||
layerID := digest.Canonical.FromBytes(b).Encoded()
|
||||
layerConfig["id"] = layerID
|
||||
|
||||
configBytes, err := json.Marshal(layerConfig)
|
||||
|
|
@ -309,10 +309,10 @@ func (w *Writer) Close() error {
|
|||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
||||
if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
return "", err
|
||||
}
|
||||
return configDigest.Hex() + ".json", nil
|
||||
return configDigest.Encoded() + ".json", nil
|
||||
}
|
||||
|
||||
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
||||
|
|
@ -320,15 +320,15 @@ func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
|||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
|
||||
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
return "", err
|
||||
}
|
||||
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
||||
// Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way
|
||||
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
||||
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
||||
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
||||
// in the root of the tarball.
|
||||
return layerDigest.Hex() + ".tar", nil
|
||||
return layerDigest.Encoded() + ".tar", nil
|
||||
}
|
||||
|
||||
type tarFI struct {
|
||||
|
|
|
|||
11
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
11
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -93,7 +94,7 @@ func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
|
|||
return sys.RegistriesDirPath
|
||||
}
|
||||
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
|
||||
if _, err := os.Stat(userRegistriesDirPath); err == nil {
|
||||
if err := fileutils.Exists(userRegistriesDirPath); err == nil {
|
||||
return userRegistriesDirPath
|
||||
}
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
|
|
@ -139,7 +140,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||
|
||||
if config.DefaultDocker != nil {
|
||||
if mergedConfig.DefaultDocker != nil {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`,
|
||||
dockerDefaultMergedFrom, configPath)
|
||||
}
|
||||
mergedConfig.DefaultDocker = config.DefaultDocker
|
||||
|
|
@ -148,7 +149,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||
|
||||
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
||||
if _, ok := mergedConfig.Docker[nsName]; ok {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`,
|
||||
nsName, nsMergedFrom[nsName], configPath)
|
||||
}
|
||||
mergedConfig.Docker[nsName] = nsConfig
|
||||
|
|
@ -287,10 +288,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
|||
// base is not nil from the caller
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
|
||||
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||
return nil, err
|
||||
}
|
||||
sigURL := *base
|
||||
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1)
|
||||
return &sigURL, nil
|
||||
}
|
||||
|
|
|
|||
24
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
24
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
|
|
@ -1,8 +1,6 @@
|
|||
package blobinfocache
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
|
@ -32,7 +30,7 @@ func (bic *v1OnlyBlobInfoCache) Close() {
|
|||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
|
||||
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -48,23 +46,3 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
|
|||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
|
||||
// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
|
||||
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
|
||||
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
|
||||
// upon success.
|
||||
func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
|
||||
switch compressorName {
|
||||
case Uncompressed:
|
||||
return types.Decompress, nil, nil
|
||||
case UnknownCompression:
|
||||
return types.PreserveOriginal, nil, nil
|
||||
default:
|
||||
algo, err := compression.AlgorithmByName(compressorName)
|
||||
if err == nil {
|
||||
return types.Compress, &algo, nil
|
||||
}
|
||||
return types.PreserveOriginal, nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
26
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
26
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
package blobinfocache
|
||||
|
||||
import (
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
|
@ -35,19 +36,24 @@ type BlobInfoCache2 interface {
|
|||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
|
||||
}
|
||||
|
||||
// CandidateLocations2Options are used in CandidateLocations2.
|
||||
type CandidateLocations2Options struct {
|
||||
// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
// The CompressorName fields in returned data must never be UnknownCompression.
|
||||
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
|
||||
CanSubstitute bool
|
||||
PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
|
||||
RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
|
||||
}
|
||||
|
||||
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
generated
vendored
|
|
@ -366,7 +366,7 @@ func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string)
|
|||
if err := blobDigest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts := append([]string{blobDigest.Hex()}, others...)
|
||||
parts := append([]string{blobDigest.Encoded()}, others...)
|
||||
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
|
||||
return hex.EncodeToString(v1IDHash[:]), nil
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/image/manifest.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/image/manifest.go
generated
vendored
|
|
@ -76,7 +76,7 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
|
|||
case imgspecv1.MediaTypeImageIndex:
|
||||
return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
|
||||
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
|
|
@ -15,7 +16,6 @@ import (
|
|||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type manifestOCI1 struct {
|
||||
|
|
|
|||
39
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
39
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
|
|
@ -3,40 +3,13 @@ package impl
|
|||
import (
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required
|
||||
// then function performs a match against the compression requested by the caller and compression of existing blob
|
||||
// (which can be nil to represent uncompressed or unknown)
|
||||
func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
|
||||
if options.RequiredCompression != nil {
|
||||
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil ||
|
||||
(options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”;
|
||||
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
|
||||
// so don’t impose any restrictions if candidateCompression == nil
|
||||
if options.PossibleManifestFormats != nil && candidateCompression != nil {
|
||||
if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool {
|
||||
return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions
|
||||
// is acceptable based on opts.
|
||||
func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool {
|
||||
return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression)
|
||||
return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: opts.PossibleManifestFormats,
|
||||
RequiredCompression: opts.RequiredCompression,
|
||||
}, opts.OriginalCompression)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
generated
vendored
|
|
@ -39,6 +39,8 @@ func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
4
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
|
|
@ -3,13 +3,13 @@ package manifest
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
platform "github.com/containers/image/v5/internal/pkg/platform"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Schema2PlatformSpec describes the platform which a particular manifest is
|
||||
|
|
@ -164,7 +164,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.
|
|||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
// Serialize returns the list in a blob format.
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
|
|
@ -129,5 +129,5 @@ func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
|
|||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
|
||||
}
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized)
|
||||
}
|
||||
|
|
|
|||
37
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
37
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package manifest
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/libtrust"
|
||||
|
|
@ -192,3 +193,39 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions;
|
||||
// it is a struct to allow longer and better-documented field names.
|
||||
type ReuseConditions struct {
|
||||
PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer
|
||||
RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm
|
||||
}
|
||||
|
||||
// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression
|
||||
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
|
||||
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
|
||||
if c.RequiredCompression != nil {
|
||||
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil ||
|
||||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”;
|
||||
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
|
||||
// so don’t impose any restrictions if candidateCompression == nil
|
||||
if c.PossibleManifestFormats != nil && candidateCompression != nil {
|
||||
if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool {
|
||||
return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
|
|
@ -3,8 +3,10 @@ package manifest
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"runtime"
|
||||
"slices"
|
||||
|
||||
platform "github.com/containers/image/v5/internal/pkg/platform"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
|
|
@ -12,8 +14,6 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -260,7 +260,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
|||
if bestMatch != nil {
|
||||
return bestMatch.digest, nil
|
||||
}
|
||||
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
|
|
|
|||
34
vendor/github.com/containers/image/v5/internal/multierr/multierr.go
generated
vendored
Normal file
34
vendor/github.com/containers/image/v5/internal/multierr/multierr.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package multierr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Format creates an error value from the input array (which should not be empty)
|
||||
// If the input contains a single error value, it is returned as is.
|
||||
// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings.
|
||||
//
|
||||
// Typical usage:
|
||||
//
|
||||
// var errs []error
|
||||
// // …
|
||||
// errs = append(errs, …)
|
||||
// // …
|
||||
// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)}
|
||||
func Format(first, middle, last string, errs []error) error {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return fmt.Errorf("internal error: multierr.Format called with 0 errors")
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
// We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid:
|
||||
// []error is not a valid parameter to a function expecting []any
|
||||
anyErrs := make([]any, 0, len(errs))
|
||||
for _, e := range errs {
|
||||
anyErrs = append(anyErrs, e)
|
||||
}
|
||||
return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...)
|
||||
}
|
||||
}
|
||||
10
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
10
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
|
|
@ -21,12 +21,12 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// For Linux, the kernel has already detected the ABI, ISA and Features.
|
||||
|
|
@ -64,8 +64,8 @@ func getCPUInfo(pattern string) (info string, err error) {
|
|||
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
|
||||
}
|
||||
|
||||
func getCPUVariantWindows(arch string) string {
|
||||
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
func getCPUVariantDarwinWindows(arch string) string {
|
||||
// Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
// runtime.GOARCH to determine the variants
|
||||
var variant string
|
||||
switch arch {
|
||||
|
|
@ -133,8 +133,8 @@ func getCPUVariantArm() string {
|
|||
}
|
||||
|
||||
func getCPUVariant(os string, arch string) string {
|
||||
if os == "windows" {
|
||||
return getCPUVariantWindows(arch)
|
||||
if os == "darwin" || os == "windows" {
|
||||
return getCPUVariantDarwinWindows(arch)
|
||||
}
|
||||
if arch == "arm" || arch == "arm64" {
|
||||
return getCPUVariantArm()
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
|
|
@ -143,7 +143,11 @@ type ReusedBlob struct {
|
|||
// ImageSourceChunk is a portion of a blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceChunk struct {
|
||||
// Offset specifies the starting position of the chunk within the source blob.
|
||||
Offset uint64
|
||||
|
||||
// Length specifies the size of the chunk. If it is set to math.MaxUint64,
|
||||
// then it refers to all the data from Offset to the end of the blob.
|
||||
Length uint64
|
||||
}
|
||||
|
||||
|
|
@ -154,6 +158,8 @@ type BlobChunkAccessor interface {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||
}
|
||||
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
9
vendor/github.com/containers/image/v5/internal/signature/sigstore.go
generated
vendored
|
|
@ -1,10 +1,9 @@
|
|||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
"maps"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -45,7 +44,7 @@ type sigstoreJSONRepresentation struct {
|
|||
func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
|
||||
return Sigstore{
|
||||
untrustedMIMEType: untrustedMimeType,
|
||||
untrustedPayload: slices.Clone(untrustedPayload),
|
||||
untrustedPayload: bytes.Clone(untrustedPayload),
|
||||
untrustedAnnotations: maps.Clone(untrustedAnnotations),
|
||||
}
|
||||
}
|
||||
|
|
@ -79,7 +78,7 @@ func (s Sigstore) UntrustedMIMEType() string {
|
|||
return s.untrustedMIMEType
|
||||
}
|
||||
func (s Sigstore) UntrustedPayload() []byte {
|
||||
return slices.Clone(s.untrustedPayload)
|
||||
return bytes.Clone(s.untrustedPayload)
|
||||
}
|
||||
|
||||
func (s Sigstore) UntrustedAnnotations() map[string]string {
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/internal/signature/simple.go
generated
vendored
8
vendor/github.com/containers/image/v5/internal/signature/simple.go
generated
vendored
|
|
@ -1,6 +1,6 @@
|
|||
package signature
|
||||
|
||||
import "golang.org/x/exp/slices"
|
||||
import "bytes"
|
||||
|
||||
// SimpleSigning is a “simple signing” signature.
|
||||
type SimpleSigning struct {
|
||||
|
|
@ -10,7 +10,7 @@ type SimpleSigning struct {
|
|||
// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
|
||||
func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
|
||||
return SimpleSigning{
|
||||
untrustedSignature: slices.Clone(blobChunk),
|
||||
untrustedSignature: bytes.Clone(blobChunk),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -21,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID {
|
|||
// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
|
||||
// Almost everyone should use signature.Blob() instead.
|
||||
func (s SimpleSigning) blobChunk() ([]byte, error) {
|
||||
return slices.Clone(s.untrustedSignature), nil
|
||||
return bytes.Clone(s.untrustedSignature), nil
|
||||
}
|
||||
|
||||
func (s SimpleSigning) UntrustedSignature() []byte {
|
||||
return slices.Clone(s.untrustedSignature)
|
||||
return bytes.Clone(s.untrustedSignature)
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
6
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
|
|
@ -67,15 +67,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
|||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
||||
}
|
||||
if name != mtsUncompressed {
|
||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
|
||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)}
|
||||
}
|
||||
// We can't very well say “the idea of no compression is unknown”
|
||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
||||
}
|
||||
if algorithm != nil {
|
||||
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
|
||||
return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType)
|
||||
}
|
||||
return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
|
||||
return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType)
|
||||
}
|
||||
|
||||
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
6
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -15,7 +16,6 @@ import (
|
|||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
|
||||
|
|
@ -221,7 +221,7 @@ func (m *Schema1) fixManifestLayers() error {
|
|||
m.History = slices.Delete(m.History, i, i+1)
|
||||
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
|
||||
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -342,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return digest.FromBytes(image).Hex(), nil
|
||||
return digest.FromBytes(image).Encoded(), nil
|
||||
}
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
9
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
|
|
@ -54,9 +54,10 @@ type Schema2HealthConfig struct {
|
|||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
|
||||
StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
|
|
@ -294,7 +295,7 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
|
|||
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return m.ConfigDescriptor.Digest.Hex(), nil
|
||||
return m.ConfigDescriptor.Digest.Encoded(), nil
|
||||
}
|
||||
|
||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
|
|
@ -166,5 +166,5 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
|||
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
|
||||
}
|
||||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt)
|
||||
}
|
||||
|
|
|
|||
17
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
17
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
|
|
@ -3,6 +3,7 @@ package manifest
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
|
|
@ -12,7 +13,6 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
|
||||
|
|
@ -167,7 +167,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
// an error if the mediatype does not support encryption
|
||||
func getEncryptedMediaType(mediatype string) (string, error) {
|
||||
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
|
||||
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
|
||||
return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype)
|
||||
}
|
||||
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
|
||||
switch unsuffixedMediatype {
|
||||
|
|
@ -176,17 +176,18 @@ func getEncryptedMediaType(mediatype string) (string, error) {
|
|||
return mediatype + "+encrypted", nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
|
||||
return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype)
|
||||
}
|
||||
|
||||
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// an error if the mediatype does not support decryption
|
||||
func getDecryptedMediaType(mediatype string) (string, error) {
|
||||
if !strings.HasSuffix(mediatype, "+encrypted") {
|
||||
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
|
||||
res, ok := strings.CutSuffix(mediatype, "+encrypted")
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype)
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(mediatype, "+encrypted"), nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Serialize returns the manifest in a blob format.
|
||||
|
|
@ -259,7 +260,7 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
|
|||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return m.Config.Digest.Hex(), nil
|
||||
return m.Config.Digest.Encoded(), nil
|
||||
}
|
||||
|
||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
|
|
@ -149,6 +149,8 @@ func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
generated
vendored
|
|
@ -6,13 +6,13 @@ import (
|
|||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// DeleteImage deletes the named image from the directory, if supported.
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
8
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
|
|
@ -6,9 +6,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
|
|
@ -16,10 +18,10 @@ import (
|
|||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type ociImageDestination struct {
|
||||
|
|
@ -301,7 +303,7 @@ func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error
|
|||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -317,7 +319,7 @@ func ensureParentDirectoryExists(path string) error {
|
|||
// indexExists checks whether the index location specified in the OCI reference exists.
|
||||
// The implementation is opinionated, since in case of unexpected errors false is returned
|
||||
func indexExists(ref ociReference) bool {
|
||||
_, err := os.Stat(ref.indexPath())
|
||||
err := fileutils.Exists(ref.indexPath())
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
6
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
|
|
@ -182,19 +182,19 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
|
|||
hasSupportedURL = true
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
||||
if err != nil {
|
||||
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
|
||||
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
|
||||
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap)
|
||||
errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
|
|
@ -256,5 +256,5 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
|
|||
} else {
|
||||
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
|
||||
}
|
||||
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
|
||||
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil
|
||||
}
|
||||
|
|
|
|||
33
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
33
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
|
|
@ -14,13 +14,14 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
|
@ -459,12 +460,6 @@ func (config *directClientConfig) getCluster() clientcmdCluster {
|
|||
return mergedClusterInfo
|
||||
}
|
||||
|
||||
// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
|
||||
// This helper implements the error and Errors interfaces. Keeping it private
|
||||
// prevents people from making an aggregate of 0 errors, which is not
|
||||
// an error, but does satisfy the error interface.
|
||||
type aggregateErr []error
|
||||
|
||||
// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
|
||||
// NewAggregate converts a slice of errors into an Aggregate interface, which
|
||||
// is itself an implementation of the error interface. If the slice is empty,
|
||||
|
|
@ -485,29 +480,9 @@ func newAggregate(errlist []error) error {
|
|||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return aggregateErr(errs)
|
||||
return multierr.Format("[", ", ", "]", errs)
|
||||
}
|
||||
|
||||
// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
|
||||
// Error is part of the error interface.
|
||||
func (agg aggregateErr) Error() string {
|
||||
if len(agg) == 0 {
|
||||
// This should never happen, really.
|
||||
return ""
|
||||
}
|
||||
if len(agg) == 1 {
|
||||
return agg[0].Error()
|
||||
}
|
||||
result := fmt.Sprintf("[%s", agg[0].Error())
|
||||
for i := 1; i < len(agg); i++ {
|
||||
result += fmt.Sprintf(", %s", agg[i].Error())
|
||||
}
|
||||
result += "]"
|
||||
return result
|
||||
}
|
||||
|
||||
// REMOVED: aggregateErr.Errors
|
||||
|
||||
// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
|
||||
// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
|
||||
type errConfigurationInvalid []error
|
||||
|
|
@ -578,7 +553,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
|||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err))
|
||||
errlist = append(errlist, fmt.Errorf("loading config file %q: %w", filename, err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
2
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
|
|
@ -152,7 +152,7 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
|
|||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||
_, repo, gotRepo := strings.Cut(ref, "/")
|
||||
if !gotRepo {
|
||||
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||
return "", fmt.Errorf("Invalid format of docker reference %q: missing '/'", ref)
|
||||
}
|
||||
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
|
|
@ -9,6 +9,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
|
|
@ -21,7 +22,6 @@ import (
|
|||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type openshiftImageDestination struct {
|
||||
|
|
|
|||
20
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
20
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
|
|
@ -11,6 +11,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -29,6 +30,7 @@ import (
|
|||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
|
|
@ -162,7 +164,7 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
hash := blobDigest.Hex()
|
||||
hash := blobDigest.Encoded()
|
||||
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
}
|
||||
|
|
@ -280,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest,
|
|||
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
|
||||
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
|
||||
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
|
||||
if err := ensureDirectoryExists(destinationPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -321,7 +323,7 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle,
|
|||
}
|
||||
|
||||
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
|
||||
destinationPath := filepath.Dir(blob.BlobPath)
|
||||
|
||||
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
|
||||
|
|
@ -346,10 +348,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
d.repo = repo
|
||||
}
|
||||
|
||||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
|
||||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
|
||||
branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())
|
||||
|
||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||
if err != nil || !found {
|
||||
|
|
@ -477,7 +479,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
|||
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return err
|
||||
}
|
||||
hash := layer.Digest.Hex()
|
||||
hash := layer.Digest.Encoded()
|
||||
if err = checkLayer(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -486,7 +488,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
|||
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return err
|
||||
}
|
||||
hash := layer.BlobSum.Hex()
|
||||
hash := layer.BlobSum.Encoded()
|
||||
if err = checkLayer(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -514,7 +516,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
|||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
13
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
13
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
|
|
@ -190,7 +190,7 @@ func (o ostreeReader) Read(p []byte) (int, error) {
|
|||
if count == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
|
||||
data := unsafe.Slice((*byte)(C.g_bytes_get_data(b, nil)), count)
|
||||
copy(p, data)
|
||||
return count, nil
|
||||
}
|
||||
|
|
@ -289,7 +289,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return nil, -1, err
|
||||
}
|
||||
blob := info.Digest.Hex()
|
||||
blob := info.Digest.Encoded()
|
||||
|
||||
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
|
||||
if s.compressed == nil {
|
||||
|
|
@ -301,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||
}
|
||||
compressedBlob, isCompressed := s.compressed[info.Digest]
|
||||
if isCompressed {
|
||||
blob = compressedBlob.Hex()
|
||||
blob = compressedBlob.Encoded()
|
||||
}
|
||||
branch := fmt.Sprintf("ociimage/%s", blob)
|
||||
|
||||
|
|
@ -424,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
|
|||
layerBlobs := man.LayerInfos()
|
||||
|
||||
for _, layerBlob := range layerBlobs {
|
||||
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
|
||||
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
|
||||
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
|
||||
if err != nil || !found {
|
||||
return nil, err
|
||||
|
|
@ -439,7 +439,10 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncompressedDigest := digest.Digest(uncompressedDigestStr)
|
||||
uncompressedDigest, err := digest.Parse(uncompressedDigestStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobInfo := types.BlobInfo{
|
||||
Digest: uncompressedDigest,
|
||||
Size: uncompressedSize,
|
||||
|
|
|
|||
109
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
109
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
|
|
@ -1,13 +1,18 @@
|
|||
// Package prioritize provides utilities for prioritizing locations in
|
||||
// Package prioritize provides utilities for filtering and prioritizing locations in
|
||||
// types.BlobInfoCache.CandidateLocations.
|
||||
package prioritize
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"cmp"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
|
||||
|
|
@ -20,28 +25,67 @@ const replacementAttempts = 5
|
|||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementUnknownLocationAttempts = 2
|
||||
|
||||
// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob
|
||||
// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options.
|
||||
//
|
||||
// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
|
||||
// if not nil, the call is assumed to be CandidateLocations2.
|
||||
//
|
||||
// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2
|
||||
func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) {
|
||||
if v2Options == nil {
|
||||
return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used.
|
||||
}
|
||||
|
||||
var op types.LayerCompression
|
||||
var algo *compression.Algorithm
|
||||
switch compressorName {
|
||||
case blobinfocache.Uncompressed:
|
||||
op = types.Decompress
|
||||
algo = nil
|
||||
case blobinfocache.UnknownCompression:
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
|
||||
return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2
|
||||
default:
|
||||
op = types.Compress
|
||||
algo_, err := compression.AlgorithmByName(compressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
|
||||
digest.String(), compressorName, err)
|
||||
return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
|
||||
}
|
||||
algo = &algo_
|
||||
}
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, algo) {
|
||||
requiredCompresssion := "nil"
|
||||
if v2Options.RequiredCompression != nil {
|
||||
requiredCompresssion = v2Options.RequiredCompression.Name()
|
||||
}
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
|
||||
digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats)
|
||||
return false, types.PreserveOriginal, nil
|
||||
}
|
||||
|
||||
return true, op, algo
|
||||
}
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
// along with the specially-treated digest values for the implementation of sort.Interface.Less
|
||||
// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize,
|
||||
// along with the specially-treated digest values relevant to the ordering.
|
||||
type candidateSortState struct {
|
||||
cs []CandidateWithTime // The entries to sort
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Len() int {
|
||||
return len(css.cs)
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Less(i, j int) bool {
|
||||
xi := css.cs[i]
|
||||
xj := css.cs[j]
|
||||
|
||||
func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
|
||||
// primaryDigest entries come first, more recent first.
|
||||
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
|
@ -50,43 +94,32 @@ func (css *candidateSortState) Less(i, j int) bool {
|
|||
if xi.Candidate.Digest != xj.Candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.Candidate.Digest == css.primaryDigest {
|
||||
return true
|
||||
return -1
|
||||
}
|
||||
if xj.Candidate.Digest == css.primaryDigest {
|
||||
return false
|
||||
return 1
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.Candidate.Digest == css.uncompressedDigest {
|
||||
return false
|
||||
return 1
|
||||
}
|
||||
if xj.Candidate.Digest == css.uncompressedDigest {
|
||||
return true
|
||||
return -1
|
||||
}
|
||||
}
|
||||
} else { // xi.Candidate.Digest == xj.Candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
return -xi.LastSeen.Compare(xj.LastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time
|
||||
return -cmp
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return xi.Candidate.Digest < xj.Candidate.Digest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Swap(i, j int) {
|
||||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest)
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
|
|
@ -100,12 +133,10 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
|||
var unknownLocationCandidates []CandidateWithTime
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
|
||||
sort.Sort(&candidateSortState{
|
||||
cs: cs,
|
||||
slices.SortFunc(cs, (&candidateSortState{
|
||||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
}).compare)
|
||||
for _, candidate := range cs {
|
||||
if candidate.Candidate.UnknownLocation {
|
||||
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
|
||||
|
|
@ -116,7 +147,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
|||
|
||||
knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
|
||||
remainingCapacity := totalLimit - knownLocationCandidatesUsed
|
||||
unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates)))
|
||||
unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates))
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
|
||||
for i := 0; i < knownLocationCandidatesUsed; i++ {
|
||||
res[i] = knownLocationCandidates[i].Candidate
|
||||
|
|
|
|||
55
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
55
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
|
|
@ -135,14 +135,17 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
|
|||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
|
||||
// with corresponding compression info from mem.compressors, and returns the result of appending
|
||||
// them to candidates. v2Output allows including candidates with unknown location, and filters out
|
||||
// candidates with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
|
||||
// them to candidates.
|
||||
// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
|
||||
// with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressorName = v
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression && v2Output {
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
return candidates
|
||||
}
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
|
|
@ -150,20 +153,22 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
for l, t := range locations {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: l,
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: l,
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
}
|
||||
} else if v2Output {
|
||||
} else if v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
|
|
@ -178,24 +183,24 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil))
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options)
|
||||
}
|
||||
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
// candidateLocations implements CandidateLocations / CandidateLocations2.
|
||||
// v2Options is not nil if the caller is CandidateLocations2.
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
|
|
@ -203,12 +208,12 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
if otherDigests != nil {
|
||||
for _, d := range otherDigests.Values() {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options)
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
132
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
132
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
|
|
@ -428,88 +428,86 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
|
|||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
|
||||
// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
|
||||
// location, and filters out candidates with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
if v2Output {
|
||||
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
|
||||
"ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
} else {
|
||||
rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+
|
||||
"LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
blobinfocache.UnknownCompression,
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("looking up candidate locations: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
for rows.Next() {
|
||||
var location string
|
||||
var time time.Time
|
||||
var compressorName string
|
||||
if err := rows.Scan(&location, &time, &compressorName); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: types.BICLocationReference{Opaque: location},
|
||||
},
|
||||
LastSeen: time,
|
||||
})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through locations: %w", err)
|
||||
}
|
||||
|
||||
if len(res) == 0 && v2Output {
|
||||
// and returns the result of appending them to candidates.
|
||||
// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates
|
||||
// with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v2Options != nil {
|
||||
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning compressorName: %w", err)
|
||||
}
|
||||
if found {
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressor,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
compressorName = compressor
|
||||
}
|
||||
}
|
||||
candidates = append(candidates, res...)
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
rows, err := tx.Query("SELECT location, time FROM KnownLocations "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("looking up candidate locations: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
rowAdded := false
|
||||
for rows.Next() {
|
||||
var location string
|
||||
var time time.Time
|
||||
if err := rows.Scan(&location, &time); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: types.BICLocationReference{Opaque: location},
|
||||
},
|
||||
LastSeen: time,
|
||||
})
|
||||
rowAdded = true
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through locations: %w", err)
|
||||
}
|
||||
|
||||
if !rowAdded && v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
// The CompressorName fields in returned data must never be UnknownCompression.
|
||||
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
|
||||
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options)
|
||||
}
|
||||
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
// candidateLocations implements CandidateLocations / CandidateLocations2.
|
||||
// v2Options is not nil if the caller is CandidateLocations2.
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 {
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -538,7 +536,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -549,7 +547,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -571,5 +569,5 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false))
|
||||
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil))
|
||||
}
|
||||
|
|
|
|||
55
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
55
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
|
|
@ -13,14 +13,15 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
helperclient "github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -231,7 +232,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
|||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var (
|
||||
creds types.DockerAuthConfig
|
||||
|
|
@ -253,7 +254,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
|||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
|
|
@ -266,7 +267,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
|||
}
|
||||
}
|
||||
if multiErr != nil {
|
||||
return types.DockerAuthConfig{}, multiErr
|
||||
return types.DockerAuthConfig{}, multierr.Format("errors looking up credentials:\n\t* ", "\nt* ", "\n", multiErr)
|
||||
}
|
||||
|
||||
logrus.Debugf("No credentials for %s found", key)
|
||||
|
|
@ -313,7 +314,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
|||
}
|
||||
|
||||
// Make sure to collect all errors.
|
||||
var multiErr error
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var desc string
|
||||
var err error
|
||||
|
|
@ -345,14 +346,14 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
|
||||
return desc, nil
|
||||
}
|
||||
return "", multiErr
|
||||
return "", multierr.Format("Errors storing credentials\n\t* ", "\n\t* ", "\n", multiErr)
|
||||
}
|
||||
|
||||
func unsupportedNamespaceErr(helper string) error {
|
||||
|
|
@ -376,53 +377,56 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
isLoggedIn := false
|
||||
|
||||
removeFromCredHelper := func(helper string) {
|
||||
removeFromCredHelper := func(helper string) error {
|
||||
if isNamespaced {
|
||||
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
err := deleteCredsFromCredHelper(helper, key)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||
isLoggedIn = true
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
|
||||
return fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)
|
||||
}
|
||||
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
var helperErr error
|
||||
if innerHelper, exists := fileContents.CredHelpers[key]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
helperErr = removeFromCredHelper(innerHelper)
|
||||
}
|
||||
if _, ok := fileContents.AuthConfigs[key]; ok {
|
||||
isLoggedIn = true
|
||||
delete(fileContents.AuthConfigs, key)
|
||||
}
|
||||
return true, "", multiErr
|
||||
return true, "", helperErr
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
removeFromCredHelper(helper)
|
||||
if err := removeFromCredHelper(helper); err != nil {
|
||||
multiErr = append(multiErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if multiErr != nil {
|
||||
return multiErr
|
||||
return multierr.Format("errors removing credentials\n\t* ", "\n\t*", "\n", multiErr)
|
||||
}
|
||||
if !isLoggedIn {
|
||||
return ErrNotLoggedIn
|
||||
|
|
@ -439,7 +443,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
var multiErr []error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
|
|
@ -479,13 +483,16 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
multiErr = append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("All credentials removed from credential helper %s", helper)
|
||||
}
|
||||
|
||||
return multiErr
|
||||
if multiErr != nil {
|
||||
return multierr.Format("errors removing all credentials:\n\t* ", "\n\t* ", "\n", multiErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareForEdit processes sys and key (if keyRelevant) to return:
|
||||
|
|
@ -570,9 +577,9 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
|
|||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
if runtimeDir != "" {
|
||||
// This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway.
|
||||
// We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case.
|
||||
_, err := os.Stat(runtimeDir)
|
||||
if os.IsNotExist(err) {
|
||||
// We are checking for fs.ErrNotExist here only to give the user better guidance what to do in this special case.
|
||||
err := fileutils.Exists(runtimeDir)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
|
||||
// or made a typo while setting the environment variable,
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
9
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package sysregistriesv2
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
|
@ -9,12 +10,12 @@ import (
|
|||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// defaultShortNameMode is the default mode of registries.conf files if the
|
||||
|
|
@ -297,11 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
|
|||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
err := errs[0]
|
||||
for i := 1; i < len(errs); i++ {
|
||||
err = fmt.Errorf("%v\n: %w", errs[i], err)
|
||||
}
|
||||
return nil, err
|
||||
return nil, multierr.Format("", "\n", "", errs)
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
3
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -564,7 +565,7 @@ func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) confi
|
|||
// decide configPath using per-user path or system file
|
||||
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
|
||||
wrapper.configPath = ctx.SystemRegistriesConfPath
|
||||
} else if _, err := os.Stat(userRegistriesFilePath); err == nil {
|
||||
} else if err := fileutils.Exists(userRegistriesFilePath); err == nil {
|
||||
// per-user registries.conf exists, not reading system dir
|
||||
// return config dirs from ctx or per-user one
|
||||
wrapper.configPath = userRegistriesFilePath
|
||||
|
|
|
|||
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
|
|
@ -8,11 +8,11 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
|
||||
|
|
@ -55,9 +55,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
}
|
||||
tlsc.RootCAs.AppendCertsFromPEM(data)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
if base, ok := strings.CutSuffix(f.Name(), ".cert"); ok {
|
||||
certName := f.Name()
|
||||
keyName := certName[:len(certName)-5] + ".key"
|
||||
keyName := base + ".key"
|
||||
logrus.Debugf(" cert: %s", fullPath)
|
||||
if !hasFile(fs, keyName) {
|
||||
return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
|
|
@ -68,9 +68,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
}
|
||||
tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".key") {
|
||||
if base, ok := strings.CutSuffix(f.Name(), ".key"); ok {
|
||||
keyName := f.Name()
|
||||
certName := keyName[:len(keyName)-4] + ".cert"
|
||||
certName := base + ".cert"
|
||||
logrus.Debugf(" key: %s", fullPath)
|
||||
if !hasFile(fs, certName) {
|
||||
return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/sif/src.go
generated
vendored
2
vendor/github.com/containers/image/v5/sif/src.go
generated
vendored
|
|
@ -111,7 +111,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
|
|||
History: []imgspecv1.History{
|
||||
{
|
||||
Created: &created,
|
||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
|
||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator),
|
||||
Comment: "imported from SIF, uuid: " + sifImg.ID(),
|
||||
},
|
||||
{
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/signature/docker.go
generated
vendored
6
vendor/github.com/containers/image/v5/signature/docker.go
generated
vendored
|
|
@ -5,13 +5,13 @@ package signature
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// SignOptions includes optional parameters for signing container images.
|
||||
|
|
@ -76,10 +76,10 @@ func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unver
|
|||
validateSignedDockerReference: func(signedDockerReference string) error {
|
||||
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
|
||||
if err != nil {
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference))
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference))
|
||||
}
|
||||
if signedRef.String() != expectedRef.String() {
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s",
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q",
|
||||
signedDockerReference, expectedDockerReference))
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
4
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
|
|
@ -10,12 +10,12 @@ import (
|
|||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/sigstore/fulcio/pkg/certificate"
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
|
||||
|
|
@ -178,7 +178,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
|
|||
|
||||
// == Validate the OIDC subject
|
||||
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)",
|
||||
f.subjectEmail,
|
||||
untrustedCertificate.EmailAddresses))
|
||||
}
|
||||
|
|
|
|||
10
vendor/github.com/containers/image/v5/signature/internal/json.go
generated
vendored
10
vendor/github.com/containers/image/v5/signature/internal/json.go
generated
vendored
|
|
@ -31,7 +31,7 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
|
|||
return JSONFormatError(err.Error())
|
||||
}
|
||||
if t != json.Delim('{') {
|
||||
return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
|
||||
return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t))
|
||||
}
|
||||
for {
|
||||
t, err := dec.Token()
|
||||
|
|
@ -45,16 +45,16 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
|
|||
key, ok := t.(string)
|
||||
if !ok {
|
||||
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
|
||||
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
|
||||
return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t))
|
||||
}
|
||||
if seenKeys.Contains(key) {
|
||||
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
|
||||
return JSONFormatError(fmt.Sprintf("Duplicate key %q", key))
|
||||
}
|
||||
seenKeys.Add(key)
|
||||
|
||||
valuePtr := fieldResolver(key)
|
||||
if valuePtr == nil {
|
||||
return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
|
||||
return JSONFormatError(fmt.Sprintf("Unknown key %q", key))
|
||||
}
|
||||
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
|
||||
if err := dec.Decode(valuePtr); err != nil {
|
||||
|
|
@ -83,7 +83,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
|
|||
}
|
||||
for key := range exactFields {
|
||||
if !seenKeys.Contains(key) {
|
||||
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
|
||||
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
|
|
@ -220,7 +220,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
|
|||
return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`)
|
||||
}
|
||||
// FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values.
|
||||
// Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21.
|
||||
// Eventually we should support them as well.
|
||||
// Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility
|
||||
// issue.
|
||||
if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 {
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
generated
vendored
6
vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go
generated
vendored
|
|
@ -150,7 +150,11 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
digestValue, err := digest.Parse(digestString)
|
||||
if err != nil {
|
||||
return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
|
||||
}
|
||||
s.untrustedDockerManifestDigest = digestValue
|
||||
|
||||
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||
"docker-reference": &s.untrustedDockerReference,
|
||||
|
|
|
|||
37
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
37
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
)
|
||||
|
|
@ -65,7 +66,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri
|
|||
return sys.SignaturePolicyPath
|
||||
}
|
||||
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
|
||||
if _, err := os.Stat(userPolicyFilePath); err == nil {
|
||||
if err := fileutils.Exists(userPolicyFilePath); err == nil {
|
||||
return userPolicyFilePath
|
||||
}
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
|
|
@ -246,7 +247,7 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
|
|||
case prTypeSigstoreSigned:
|
||||
res = &prSigstoreSigned{}
|
||||
default:
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type))
|
||||
}
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -278,7 +279,7 @@ func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeInsecureAcceptAnything {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*pr = *newPRInsecureAcceptAnything()
|
||||
return nil
|
||||
|
|
@ -308,7 +309,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeReject {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*pr = *newPRReject()
|
||||
return nil
|
||||
|
|
@ -317,7 +318,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
|
|||
// newPRSignedBy returns a new prSignedBy if parameters are valid.
|
||||
func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
|
||||
if !keyType.IsValid() {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType))
|
||||
}
|
||||
keySources := 0
|
||||
if keyPath != "" {
|
||||
|
|
@ -409,7 +410,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeSignedBy {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||
|
|
@ -465,7 +466,7 @@ func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
|
|||
return err
|
||||
}
|
||||
if !sbKeyType(s).IsValid() {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s))
|
||||
}
|
||||
*kt = sbKeyType(s)
|
||||
return nil
|
||||
|
|
@ -503,7 +504,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeSignedBaseLayer {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
||||
if err != nil {
|
||||
|
|
@ -539,7 +540,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
|
|||
case prmTypeRemapIdentity:
|
||||
res = &prmRemapIdentity{}
|
||||
default:
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type))
|
||||
}
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -571,7 +572,7 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchExact {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchExact()
|
||||
return nil
|
||||
|
|
@ -601,7 +602,7 @@ func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchRepoDigestOrExact {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchRepoDigestOrExact()
|
||||
return nil
|
||||
|
|
@ -631,7 +632,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchRepository {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchRepository()
|
||||
return nil
|
||||
|
|
@ -641,10 +642,10 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
|||
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(dockerReference)
|
||||
if err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error()))
|
||||
}
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference))
|
||||
}
|
||||
return &prmExactReference{
|
||||
prmCommon: prmCommon{Type: prmTypeExactReference},
|
||||
|
|
@ -672,7 +673,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeExactReference {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMExactReference(tmp.DockerReference)
|
||||
|
|
@ -686,7 +687,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
|||
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
|
||||
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
||||
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error()))
|
||||
}
|
||||
return &prmExactRepository{
|
||||
prmCommon: prmCommon{Type: prmTypeExactRepository},
|
||||
|
|
@ -714,7 +715,7 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeExactRepository {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMExactRepository(tmp.DockerRepository)
|
||||
|
|
@ -787,7 +788,7 @@ func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeRemapIdentity {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix)
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go
generated
vendored
|
|
@ -176,7 +176,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeSigstoreSigned {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||
|
|
|
|||
10
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
10
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
|
|
@ -94,10 +94,10 @@ const (
|
|||
pcDestroyed policyContextState = "Destroyed"
|
||||
)
|
||||
|
||||
// changeContextState changes pc.state, or fails if the state is unexpected
|
||||
// changeState changes pc.state, or fails if the state is unexpected
|
||||
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
||||
if pc.state != expected {
|
||||
return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state)
|
||||
}
|
||||
pc.state = new
|
||||
return nil
|
||||
|
|
@ -140,21 +140,21 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
|
|||
// Look for a full match.
|
||||
identity := ref.PolicyConfigurationIdentity()
|
||||
if req, ok := transportScopes[identity]; ok {
|
||||
logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
|
||||
logrus.Debugf(` Using transport %q policy section %q`, transportName, identity)
|
||||
return req
|
||||
}
|
||||
|
||||
// Look for a match of the possible parent namespaces.
|
||||
for _, name := range ref.PolicyConfigurationNamespaces() {
|
||||
if req, ok := transportScopes[name]; ok {
|
||||
logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
|
||||
logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name)
|
||||
return req
|
||||
}
|
||||
}
|
||||
|
||||
// Look for a default match for the transport.
|
||||
if req, ok := transportScopes[""]; ok {
|
||||
logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
|
||||
logrus.Debugf(` Using transport %q policy section ""`, transportName)
|
||||
return req
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue