Update osbuild/images to v0.40.0

In addition, simplify the SPEC file to not have to update the minimum
required osbuild version gazillion times, but just once.

Update the minimum required osbuild version to v109, due to changes in
grub2 stages required by the new osbuild/images version.

Update osbild SHA in Schutzfile to v109.

Signed-off-by: Tomáš Hozza <thozza@redhat.com>
This commit is contained in:
Tomáš Hozza 2024-02-22 21:55:33 +01:00 committed by Tomáš Hozza
parent c138ea6939
commit 2f087f1a6c
190 changed files with 57031 additions and 52810 deletions

View file

@ -1,5 +1,15 @@
# Release History
## 1.9.2 (2024-02-06)
### Bugs Fixed
* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header.
### Other Changes
* Update to latest version of `internal`.
## 1.9.1 (2023-12-11)
### Bugs Fixed

View file

@ -125,46 +125,11 @@ func (req *Request) OperationValue(value interface{}) bool {
// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length
// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "",
// Content-Type won't be set.
// Content-Type won't be set, and if it was set, will be deleted.
// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser.
func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
var err error
var size int64
if body != nil {
size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
if err != nil {
return err
}
}
if size == 0 {
// treat an empty stream the same as a nil one: assign req a nil body
body = nil
// RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content
// (Del is a no-op when the header has no value)
req.req.Header.Del(shared.HeaderContentLength)
} else {
_, err = body.Seek(0, io.SeekStart)
if err != nil {
return err
}
req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
req.Raw().GetBody = func() (io.ReadCloser, error) {
_, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
return body, err
}
}
// keep a copy of the body argument. this is to handle cases
// where req.Body is replaced, e.g. httputil.DumpRequest and friends.
req.body = body
req.req.Body = body
req.req.ContentLength = size
if contentType == "" {
// Del is a no-op when the header has no value
req.req.Header.Del(shared.HeaderContentType)
} else {
req.req.Header.Set(shared.HeaderContentType, contentType)
}
return nil
// clobber the existing Content-Type to preserve behavior
return SetBody(req, body, contentType, true)
}
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
@ -211,3 +176,48 @@ type PolicyFunc func(*Request) (*http.Response, error)
func (pf PolicyFunc) Do(req *Request) (*http.Response, error) {
return pf(req)
}
// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly.
// - req is the request to modify
// - body is the request body; if nil or empty, Content-Length won't be set
// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted
// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType
func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error {
var err error
var size int64
if body != nil {
size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
if err != nil {
return err
}
}
if size == 0 {
// treat an empty stream the same as a nil one: assign req a nil body
body = nil
// RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content
// (Del is a no-op when the header has no value)
req.req.Header.Del(shared.HeaderContentLength)
} else {
_, err = body.Seek(0, io.SeekStart)
if err != nil {
return err
}
req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
req.Raw().GetBody = func() (io.ReadCloser, error) {
_, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
return body, err
}
}
// keep a copy of the body argument. this is to handle cases
// where req.Body is replaced, e.g. httputil.DumpRequest and friends.
req.body = body
req.req.Body = body
req.req.ContentLength = size
if contentType == "" {
// Del is a no-op when the header has no value
req.req.Header.Del(shared.HeaderContentType)
} else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType {
req.req.Header.Set(shared.HeaderContentType, contentType)
}
return nil
}

View file

@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.9.1"
Version = "v1.9.2"
)

View file

@ -97,7 +97,8 @@ func EncodeByteArray(v []byte, format Base64Encoding) string {
func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error {
// send as a JSON string
encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format))
return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON)
// tsp generated code can set Content-Type so we must prefer that
return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false)
}
// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody.
@ -106,7 +107,8 @@ func MarshalAsJSON(req *policy.Request, v interface{}) error {
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON)
// tsp generated code can set Content-Type so we must prefer that
return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false)
}
// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody.

View file

@ -39,6 +39,11 @@ type PayloadOptions struct {
// Subsequent reads will access the cached value.
// Exported as runtime.Payload() WITHOUT the opts parameter.
func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) {
if resp.Body == nil {
// this shouldn't happen in real-world scenarios as a
// response with no body should set it to http.NoBody
return nil, nil
}
modifyBytes := func(b []byte) []byte { return b }
if opts != nil && opts.BytesModifier != nil {
modifyBytes = opts.BytesModifier

View file

@ -1,5 +1,26 @@
# Release History
## 1.3.0 (2024-02-12)
### Bugs Fixed
* Fix concurrency issue while Downloading File. Fixes [#22156](https://github.com/Azure/azure-sdk-for-go/issues/22156).
* Fix panic when nil options bag is passed to NewGetPageRangesPager. Fixes [22356](https://github.com/Azure/azure-sdk-for-go/issues/22356).
* Fix file offset update after Download file. Fixes [#22297](https://github.com/Azure/azure-sdk-for-go/issues/22297).
### Other Changes
* Updated the version of `azcore` to `1.9.2`
## 1.3.0-beta.1 (2024-01-09)
### Features Added
* Updated service version to `2023-11-03`.
* Added support for Audience when OAuth is used.
### Bugs Fixed
* Block `SharedKeyCredential` authentication mode for non TLS protected endpoints. Fixes [#21841](https://github.com/Azure/azure-sdk-for-go/issues/21841).
## 1.2.1 (2023-12-13)
### Features Added

View file

@ -1,6 +1,6 @@
# Azure Blob Storage module for Go
> Service Version: 2023-08-03
> Service Version: 2023-11-03
Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob
Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or

View file

@ -35,11 +35,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
audience := base.GetAudience((*base.ClientOptions)(options))
authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
@ -54,7 +55,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
@ -71,7 +72,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}

View file

@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azblob",
"Tag": "go/storage/azblob_0040e8284c"
"Tag": "go/storage/azblob_9f40a5a13d"
}

View file

@ -9,6 +9,7 @@ package blob
import (
"context"
"io"
"math"
"os"
"sync"
"time"
@ -36,15 +37,16 @@ type Client base.Client[generated.BlobClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
audience := base.GetAudience((*base.ClientOptions)(options))
authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlobClient(blobURL, azClient, &cred)), nil
return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -54,11 +56,11 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlobClient(blobURL, azClient, nil)), nil
return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -70,11 +72,11 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential,
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlobClient(blobURL, azClient, cred)), nil
return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -112,6 +114,10 @@ func (b *Client) credential() any {
return base.Credential((*base.Client[generated.BlobClient])(b))
}
func (b *Client) getClientOptions() *base.ClientOptions {
return base.GetClientOptions((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (b *Client) URL() string {
return b.generated().Endpoint()
@ -126,7 +132,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) {
}
p.Snapshot = snapshot
return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil
return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
@ -138,7 +144,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) {
}
p.VersionID = versionID
return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil
return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
@ -464,23 +470,18 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO
buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize)
defer buffers.Free()
acquireBuffer := func() ([]byte, error) {
select {
case b := <-buffers.Acquire():
// got a buffer
return b, nil
default:
// no buffer available; allocate a new buffer if possible
if _, err := buffers.Grow(); err != nil {
return nil, err
}
// either grab the newly allocated buffer or wait for one to become available
return <-buffers.Acquire(), nil
numChunks := uint16((count-1)/o.BlockSize + 1)
for bufferCounter := float64(0); bufferCounter < math.Min(float64(numChunks), float64(o.Concurrency)); bufferCounter++ {
if _, err := buffers.Grow(); err != nil {
return 0, err
}
}
numChunks := uint16((count-1)/o.BlockSize) + 1
acquireBuffer := func() ([]byte, error) {
return <-buffers.Acquire(), nil
}
blocks := make([]chan []byte, numChunks)
for b := range blocks {
blocks[b] = make(chan []byte)
@ -595,6 +596,11 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
}
do := (*downloadOptions)(o)
filePointer, err := file.Seek(0, io.SeekCurrent)
if err != nil {
return 0, err
}
// 1. Calculate the size of the destination file
var size int64
@ -623,7 +629,15 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
}
if size > 0 {
return b.downloadFile(ctx, file, *do)
writeSize, err := b.downloadFile(ctx, file, *do)
if err != nil {
return 0, err
}
_, err = file.Seek(filePointer, io.SeekStart)
if err != nil {
return 0, err
}
return writeSize, nil
} else { // if the blob's size is 0, there is no need in downloading it
return 0, nil
}

View file

@ -45,11 +45,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
audience := base.GetAudience((*base.ClientOptions)(options))
authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
@ -63,7 +64,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
@ -80,7 +81,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}

View file

@ -31,11 +31,7 @@ type Client struct {
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
var clientOptions *service.ClientOptions
if options != nil {
clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions}
}
svcClient, err := service.NewClient(serviceURL, cred, clientOptions)
svcClient, err := service.NewClient(serviceURL, cred, (*service.ClientOptions)(options))
if err != nil {
return nil, err
}
@ -50,11 +46,7 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/?<sas token>
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
var clientOptions *service.ClientOptions
if options != nil {
clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions}
}
svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions)
svcClient, err := service.NewClientWithNoCredential(serviceURL, (*service.ClientOptions)(options))
if err != nil {
return nil, err
}
@ -83,15 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti
// - connectionString - a connection string for the desired storage account
// - options - client options; pass nil to accept the default values
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
if options == nil {
options = &ClientOptions{}
}
containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options))
svcClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options))
if err != nil {
return nil, err
}
return &Client{
svc: containerClient,
svc: svcClient,
}, nil
}

View file

@ -42,15 +42,16 @@ type Client base.Client[generated.ContainerClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
audience := base.GetAudience((*base.ClientOptions)(options))
authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewContainerClient(containerURL, azClient, &cred)), nil
return (*Client)(base.NewContainerClient(containerURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -60,11 +61,11 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client
func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewContainerClient(containerURL, azClient, nil)), nil
return (*Client)(base.NewContainerClient(containerURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -76,11 +77,11 @@ func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCreden
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewContainerClient(containerURL, azClient, cred)), nil
return (*Client)(base.NewContainerClient(containerURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -122,6 +123,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient {
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
func (c *Client) getClientOptions() *base.ClientOptions {
return base.GetClientOptions((*base.Client[generated.ContainerClient])(c))
}
// URL returns the URL endpoint used by the Client object.
func (c *Client) URL() string {
return c.generated().Endpoint()
@ -133,7 +138,7 @@ func (c *Client) URL() string {
func (c *Client) NewBlobClient(blobName string) *blob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlobClient), c.credential()))
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.credential(), c.getClientOptions()))
}
// NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of
@ -142,7 +147,7 @@ func (c *Client) NewBlobClient(blobName string) *blob.Client {
func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.AppendBlobClient), c.sharedKey()))
return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey()))
}
// NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of
@ -151,7 +156,7 @@ func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client {
func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlockBlobClient), c.sharedKey()))
return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey()))
}
// NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of
@ -160,7 +165,7 @@ func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client {
func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.PageBlobClient), c.sharedKey()))
return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey()))
}
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
@ -366,7 +371,7 @@ func (c *Client) NewBatchBuilder() (*BatchBuilder, error) {
switch cred := c.credential().(type) {
case *azcore.TokenCredential:
authPolicy = shared.NewStorageChallengePolicy(*cred)
authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(c.getClientOptions()))
case *SharedKeyCredential:
authPolicy = exported.NewSharedKeyCredPolicy(cred)
case nil:

View file

@ -10,16 +10,24 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"strings"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
// Audience to use when requesting tokens for Azure Active Directory authentication.
// Only has an effect when credential is of type TokenCredential. The value could be
// https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
Audience string
}
type Client[T any] struct {
inner *T
credential any
options *ClientOptions
}
func InnerClient[T any](client *Client[T]) *T {
@ -39,28 +47,43 @@ func Credential[T any](client *Client[T]) any {
return client.credential
}
func GetClientOptions[T any](client *Client[T]) *ClientOptions {
return client.options
}
func GetAudience(clOpts *ClientOptions) string {
if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 {
return shared.TokenScope
} else {
return strings.TrimRight(clOpts.Audience, "/") + "/.default"
}
}
func NewClient[T any](inner *T) *Client[T] {
return &Client[T]{inner: inner}
}
func NewServiceClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ServiceClient] {
func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] {
return &Client[generated.ServiceClient]{
inner: generated.NewServiceClient(containerURL, azClient),
credential: credential,
options: options,
}
}
func NewContainerClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ContainerClient] {
func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] {
return &Client[generated.ContainerClient]{
inner: generated.NewContainerClient(containerURL, azClient),
credential: credential,
options: options,
}
}
func NewBlobClient(blobURL string, azClient *azcore.Client, credential any) *Client[generated.BlobClient] {
func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] {
return &Client[generated.BlobClient]{
inner: generated.NewBlobClient(blobURL, azClient),
credential: credential,
options: options,
}
}

View file

@ -11,12 +11,6 @@ import (
"bytes"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"io"
"mime"
"mime/multipart"
@ -24,6 +18,13 @@ import (
"net/textproto"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
const (
@ -45,7 +46,7 @@ func createBatchID() (string, error) {
// buildSubRequest is used for building the sub-request. Example:
// DELETE /container0/blob0 HTTP/1.1
// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT
// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE=
// Authorization: SharedKey account:<redacted>
// Content-Length: 0
func buildSubRequest(req *policy.Request) []byte {
var batchSubRequest strings.Builder
@ -80,7 +81,7 @@ func buildSubRequest(req *policy.Request) []byte {
//
// DELETE /container0/blob0 HTTP/1.1
// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT
// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE=
// Authorization: SharedKey account:<redacted>
// Content-Length: 0
func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) {
batchID, err := createBatchID()

View file

@ -11,7 +11,9 @@ import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
"net/http"
"net/url"
"sort"
@ -195,6 +197,17 @@ func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy {
}
func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
// skip adding the authorization header if no SharedKeyCredential was provided.
// this prevents a panic that might be hard to diagnose and allows testing
// against http endpoints that don't require authentication.
if s.cred == nil {
return req.Next()
}
if err := checkHTTPSForAuth(req); err != nil {
return nil, err
}
if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" {
req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat))
}
@ -216,3 +229,10 @@ func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
}
return response, err
}
func checkHTTPSForAuth(req *policy.Request) error {
if strings.ToLower(req.Raw().URL.Scheme) != "https" {
return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints"))
}
return nil
}

View file

@ -7,6 +7,6 @@
package exported
const (
ModuleName = "azblob"
ModuleVersion = "v1.2.1"
ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
ModuleVersion = "v1.3.0"
)

View file

@ -22,7 +22,7 @@ export-clients: true
use: "@autorest/go@4.0.0-preview.61"
```
### Updating service version to 2023-08-03
### Updating service version to 2023-11-03
```yaml
directive:
- from:
@ -36,7 +36,7 @@ directive:
transform: >-
return $.
replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`).
replaceAll(`2021-12-02`, `2023-08-03`);
replaceAll(`2021-12-02`, `2023-11-03`);
```
### Undo breaking change with BlobName

View file

@ -6,4 +6,4 @@
package generated
const ServiceVersion = "2023-08-03"
const ServiceVersion = "2023-11-03"

View file

@ -32,7 +32,7 @@ type AppendBlobClient struct {
// AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - body - Initial data
// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method.
@ -201,7 +201,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - sourceURL - Specify a URL to the copy source.
// - contentLength - The length of the request.
// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL
@ -387,7 +387,7 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
// Create - The Create Append Blob operation creates a new append blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method.
// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
@ -560,7 +560,7 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen
// or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.

View file

@ -32,7 +32,7 @@ type BlobClient struct {
// blob with zero length and full metadata.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation.
// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -104,7 +104,7 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite
// lease can be between 15 and 60 seconds. A lease duration cannot be changed using
// renew or change.
@ -206,7 +206,7 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC
// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) {
@ -309,7 +309,7 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli
// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - leaseID - Specifies the current lease ID on the resource.
// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
@ -411,7 +411,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl
// until the copy is complete.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@ -585,7 +585,7 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl
// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
@ -724,7 +724,7 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
// return an HTTP status code of 404 (ResourceNotFound).
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -820,7 +820,7 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD
// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy
// method.
func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) {
@ -887,7 +887,7 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp
// can also call Download to read a snapshot.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -1195,7 +1195,7 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method.
func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) {
var err error
@ -1262,7 +1262,7 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
// for the blob. It does not return the content of the blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -1580,7 +1580,7 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
// GetTags - The Get Tags operation enables users to get the tags associated with a blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -1662,7 +1662,7 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient
// Query - The Query operation enables users to select/project on blob data by providing simple query expressions.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -1896,7 +1896,7 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1990,7 +1990,7 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC
// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -2087,7 +2087,7 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli
// SetExpiry - Sets the time a blob will expire and be deleted.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - expiryOptions - Required. Indicates mode of the expiry time
// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method.
func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) {
@ -2167,7 +2167,7 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie
// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method.
// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -2288,7 +2288,7 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo
// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy
// method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -2374,7 +2374,7 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons
// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - legalHold - Specified if a legal hold should be set on the blob.
// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method.
func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) {
@ -2449,7 +2449,7 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC
// pairs
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -2581,7 +2581,7 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl
// SetTags - The Set Tags operation enables users to set tags on a blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - tags - Blob tags
// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -2670,7 +2670,7 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient
// storage type. This operation does not update the blob's ETag.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - tier - Indicates the tier to be set on the blob.
// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -2747,7 +2747,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient
// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@ -2899,7 +2899,7 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
// Undelete - Undelete a blob that was previously soft deleted
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method.
func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) {
var err error

View file

@ -36,7 +36,7 @@ type BlockBlobClient struct {
// belong to.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - blocks - Blob Blocks.
// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList
// method.
@ -227,7 +227,7 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response
// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together.
// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -332,7 +332,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (
// Block from URL API in conjunction with Put Block List.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
@ -535,7 +535,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response)
// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
@ -662,7 +662,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
// are read from a URL.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
@ -810,7 +810,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
// the content of a block blob, use the Put Block List operation.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - body - Initial data
// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method.

View file

@ -34,7 +34,7 @@ type ContainerClient struct {
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite
// lease can be between 15 and 60 seconds. A lease duration cannot be changed using
// renew or change.
@ -129,7 +129,7 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) {
@ -225,7 +225,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - leaseID - Specifies the current lease ID on the resource.
// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
@ -319,7 +319,7 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C
// fails
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method.
// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method.
func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) {
@ -412,7 +412,7 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai
// deleted during garbage collection
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -489,7 +489,7 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai
// Filter blobs searches within the given container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - where - Filters the results to return only to return only blobs whose tags match the specified expression.
// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method.
func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) {
@ -570,7 +570,7 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C
// be accessed publicly.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -657,7 +657,7 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo
// method.
func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) {
@ -725,7 +725,7 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response)
// does not include the container's list of blobs
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) {
@ -854,7 +854,7 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response)
// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager
// method.
//
@ -921,7 +921,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp
// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that
// acts as a placeholder for all blobs whose names begin with the same substring up to the
// appearance of the delimiter character. The delimiter may be a single character or a string.
@ -1014,7 +1014,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1100,7 +1100,7 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (
// Rename - Renames an existing container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - sourceContainerName - Required. Specifies the name of the container to rename.
// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method.
func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) {
@ -1172,7 +1172,7 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1261,7 +1261,7 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co
// Restore - Restores a previously-deleted container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method.
func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) {
var err error
@ -1334,7 +1334,7 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta
// may be accessed publicly.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - containerACL - the acls for the container
// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy
// method.
@ -1433,7 +1433,7 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response
// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1524,7 +1524,7 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
// value: multipart/mixed; boundary=batch_

View file

@ -30,7 +30,7 @@ type PageBlobClient struct {
// ClearPages - The Clear Pages operation clears a set of pages from a page blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -181,7 +181,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
// 2016-05-31.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@ -283,7 +283,7 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response)
// Create - The Create operation creates a new page blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
// to a 512-byte boundary.
@ -464,7 +464,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo
// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot
// of a page blob
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -585,7 +585,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (
// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that
// were changed between target blob and previous snapshot.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -712,7 +712,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons
// Resize - Resize the Blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
// to a 512-byte boundary.
// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method.
@ -831,7 +831,7 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
// UpdateSequenceNumber - Update the sequence number of the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to
// page blobs only. This property indicates how the service should modify the blob's sequence number
// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber
@ -940,7 +940,7 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
// UploadPages - The Upload Pages operation writes a range of pages to a page blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - body - Initial data
// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method.
@ -1111,7 +1111,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
// a URL
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - sourceURL - Specify a URL to the copy source.
// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header
// and x-ms-range/Range destination range header.

View file

@ -33,7 +33,7 @@ type ServiceClient struct {
// be scoped within the expression to a single container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - where - Filters the results to return only to return only blobs whose tags match the specified expression.
// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) {
@ -112,7 +112,7 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) {
var err error
@ -186,7 +186,7 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (
// CORS (Cross-Origin Resource Sharing) rules.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) {
var err error
@ -249,7 +249,7 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S
// location endpoint when read-access geo-redundant replication is enabled for the storage account.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method.
func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) {
var err error
@ -319,7 +319,7 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S
// bearer token authentication.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - keyInfo - Key information
// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey
// method.
@ -393,7 +393,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo
// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified
// account
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager
// method.
//
@ -451,7 +451,7 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp
// and CORS (Cross-Origin Resource Sharing) rules
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - storageServiceProperties - The StorageService properties.
// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method.
func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) {
@ -514,7 +514,7 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// Generated from API version 2023-11-03
// - contentLength - The length of the request.
// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
// value: multipart/mixed; boundary=batch_

View file

@ -20,9 +20,9 @@ type storageAuthorizer struct {
tenantID string
}
func NewStorageChallengePolicy(cred azcore.TokenCredential) policy.Policy {
s := storageAuthorizer{scopes: []string{TokenScope}}
return runtime.NewBearerTokenPolicy(cred, []string{TokenScope}, &policy.BearerTokenOptions{
func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string) policy.Policy {
s := storageAuthorizer{scopes: []string{audience}}
return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{
AuthorizationHandler: policy.AuthorizationHandler{
OnRequest: s.onRequest,
OnChallenge: s.onChallenge,

View file

@ -44,15 +44,6 @@ const (
const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5
const (
AppendBlobClient = "azblob/appendblob.Client"
BlobClient = "azblob/blob.Client"
BlockBlobClient = "azblob/blockblob.Client"
ContainerClient = "azblob/container.Client"
PageBlobClient = "azblob/pageblob.Client"
ServiceClient = "azblob/service.Client"
)
var CRC64Table = crc64.MakeTable(crc64Polynomial)
// CopyOptions returns a zero-value T if opts is nil.

View file

@ -36,11 +36,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
audience := base.GetAudience((*base.ClientOptions)(options))
authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
@ -54,7 +55,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
@ -70,7 +71,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}

View file

@ -198,7 +198,7 @@ type GetPageRangesOptions struct {
func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
return &generated.PageBlobClientGetPageRangesOptions{}, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)

View file

@ -40,15 +40,16 @@ type Client base.Client[generated.ServiceClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
audience := base.GetAudience((*base.ClientOptions)(options))
authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred)), nil
return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -58,11 +59,11 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewServiceClient(serviceURL, azClient, nil)), nil
return (*Client)(base.NewServiceClient(serviceURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -74,12 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti
conOptions := shared.GetClientOptions(options)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewServiceClient(serviceURL, azClient, cred)), nil
return (*Client)(base.NewServiceClient(serviceURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -136,6 +137,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient {
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
func (s *Client) getClientOptions() *base.ClientOptions {
return base.GetClientOptions((*base.Client[generated.ServiceClient])(s))
}
// URL returns the URL endpoint used by the Client object.
func (s *Client) URL() string {
return s.generated().Endpoint()
@ -145,7 +150,7 @@ func (s *Client) URL() string {
// this Client's URL. The new container.Client uses the same request policy pipeline as the Client.
func (s *Client) NewContainerClient(containerName string) *container.Client {
containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName)
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(shared.ContainerClient), s.credential()))
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(exported.ModuleName), s.credential(), s.getClientOptions()))
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
@ -315,7 +320,7 @@ func (s *Client) NewBatchBuilder() (*BatchBuilder, error) {
switch cred := s.credential().(type) {
case *azcore.TokenCredential:
authPolicy = shared.NewStorageChallengePolicy(*cred)
authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(s.getClientOptions()))
case *SharedKeyCredential:
authPolicy = exported.NewSharedKeyCredPolicy(cred)
case nil:

View file

@ -661,6 +661,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "acm-pca-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-west-1",
}: endpoint{},
endpointKey{
Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -694,6 +703,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ca-west-1",
}: endpoint{
Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@ -4017,15 +4035,75 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-1-fips",
}: endpoint{
Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-east-2-fips",
}: endpoint{
Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-1-fips",
}: endpoint{
Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2-fips",
}: endpoint{
Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
},
},
"autoscaling": service{
@ -5853,6 +5931,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -10263,6 +10344,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "ec2-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-west-1",
}: endpoint{},
endpointKey{
Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "ec2-fips.ca-west-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -10302,6 +10392,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ca-west-1",
}: endpoint{
Hostname: "ec2-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@ -11310,6 +11409,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-west-1",
}: endpoint{},
endpointKey{
Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -11490,6 +11598,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ca-west-1",
}: endpoint{
Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-eu-central-1",
}: endpoint{
@ -19864,6 +19981,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -19873,6 +19993,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -22059,12 +22185,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -22971,6 +23103,19 @@ var awsPartition = partition{
},
},
},
"private-networks": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
"profile": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -24433,6 +24578,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -24448,18 +24599,87 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
},
},
},
"rekognition": service{
@ -24783,153 +25003,64 @@ var awsPartition = partition{
},
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.aws",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{
Hostname: "resource-explorer-2.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
}: endpoint{
Hostname: "resource-explorer-2.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
Hostname: "resource-explorer-2.ap-northeast-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{
Hostname: "resource-explorer-2.ap-northeast-2.api.aws",
},
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{
Hostname: "resource-explorer-2.ap-northeast-3.api.aws",
},
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{
Hostname: "resource-explorer-2.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-south-2",
}: endpoint{
Hostname: "resource-explorer-2.ap-south-2.api.aws",
},
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-2.api.aws",
},
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-4.api.aws",
},
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{
Hostname: "resource-explorer-2.ca-central-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{
Hostname: "resource-explorer-2.eu-central-1.api.aws",
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "resource-explorer-2.eu-central-2.api.aws",
},
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{
Hostname: "resource-explorer-2.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
}: endpoint{
Hostname: "resource-explorer-2.eu-south-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{
Hostname: "resource-explorer-2.eu-west-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{
Hostname: "resource-explorer-2.eu-west-2.api.aws",
},
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{
Hostname: "resource-explorer-2.eu-west-3.api.aws",
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "resource-explorer-2.il-central-1.api.aws",
},
endpointKey{
Region: "me-central-1",
}: endpoint{
Hostname: "resource-explorer-2.me-central-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{
Hostname: "resource-explorer-2.me-south-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{
Hostname: "resource-explorer-2.sa-east-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{
Hostname: "resource-explorer-2.us-east-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{
Hostname: "resource-explorer-2.us-east-2.api.aws",
},
}: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{
Hostname: "resource-explorer-2.us-west-1.api.aws",
},
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{
Hostname: "resource-explorer-2.us-west-2.api.aws",
},
}: endpoint{},
},
},
"resource-groups": service{
@ -29047,12 +29178,18 @@ var awsPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -29086,6 +29223,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
@ -29095,6 +29238,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@ -29113,6 +29259,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@ -29128,6 +29292,12 @@ var awsPartition = partition{
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -29137,6 +29307,24 @@ var awsPartition = partition{
}: endpoint{
Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -35480,6 +35668,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"inspector2": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -35969,31 +36167,6 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.amazonwebservices.com.cn",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.amazonwebservices.com.cn",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{
Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn",
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
},
"resource-groups": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -39079,6 +39252,16 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
"emr-serverless": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
},
},
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -41409,31 +41592,6 @@ var awsusgovPartition = partition{
},
},
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.aws",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
Hostname: "resource-explorer-2.us-gov-east-1.api.aws",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
Hostname: "resource-explorer-2.us-gov-west-1.api.aws",
},
},
},
"resource-groups": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@ -42319,6 +42477,15 @@ var awsusgovPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@ -42328,6 +42495,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@ -43326,6 +43502,20 @@ var awsisoPartition = partition{
},
},
},
"api.pricing": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
CredentialScope: credentialScope{
Service: "pricing",
},
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
},
},
"api.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -43375,6 +43565,13 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
"arc-zonal-shift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
},
},
"athena": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -44522,6 +44719,20 @@ var awsisobPartition = partition{
},
},
},
"api.pricing": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
CredentialScope: credentialScope{
Service: "pricing",
},
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
},
},
"api.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -44555,6 +44766,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
"arc-zonal-shift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
},
},
"autoscaling": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.50.9"
const SDKVersion = "1.50.24"

View file

@ -1,3 +1,3 @@
{
"v2": "2.12.0"
"v2": "2.12.1"
}

View file

@ -1,5 +1,12 @@
# Changelog
## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13)
### Bug Fixes
* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe))
## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26)

View file

@ -38,6 +38,14 @@ import (
)
const (
// XGoogFieldMaskHeader is the canonical header key for the [System Parameter]
// that specifies the response read mask. The value(s) for this header
// must adhere to format described in [fieldmaskpb].
//
// [System Parameter]: https://cloud.google.com/apis/docs/system-parameters
// [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb
XGoogFieldMaskHeader = "x-goog-fieldmask"
headerKey = contextKey("header")
)

View file

@ -103,7 +103,9 @@ func goVersion() string {
return "UNKNOWN"
}
// XGoogHeader is for use by the Google Cloud Libraries only.
// XGoogHeader is for use by the Google Cloud Libraries only. See package
// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving
// request/response headers.
//
// XGoogHeader formats key-value pairs.
// The resulting string is suitable for x-goog-api-client header.
@ -125,7 +127,8 @@ func XGoogHeader(keyval ...string) string {
}
// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries
// only.
// only. See package [github.com/googleapis/gax-go/v2/callctx] for help
// setting/retrieving request/response headers.
//
// InsertMetadataIntoOutgoingContext returns a new context that merges the
// provided keyvals metadata pairs with any existing metadata/headers in the
@ -137,7 +140,9 @@ func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) c
return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...))
}
// BuildHeaders is for use by the Google Cloud Libraries only.
// BuildHeaders is for use by the Google Cloud Libraries only. See package
// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving
// request/response headers.
//
// BuildHeaders returns a new http.Header that merges the provided
// keyvals header pairs with any existing metadata/headers in the provided

View file

@ -30,4 +30,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "2.12.0"
const Version = "2.12.1"

View file

@ -1,3 +1,18 @@
## v1.9.0 (2024-02-02)
New features and improvements:
* [GH-2884](https://github.com/gophercloud/gophercloud/pull/2884) [v1] Context-aware methods to ProviderClient and ServiceClient
* [GH-2887](https://github.com/gophercloud/gophercloud/pull/2887) [v1] Add support of Flavors and FlavorProfiles for Octavia
* [GH-2875](https://github.com/gophercloud/gophercloud/pull/2875) [v1] [db/v1/instance]: adding support for availability_zone for a db instance
CI changes:
* [GH-2856](https://github.com/gophercloud/gophercloud/pull/2856) [v1] Fix devstack install on EOL magnum branches
* [GH-2857](https://github.com/gophercloud/gophercloud/pull/2857) [v1] Fix networking acceptance tests
* [GH-2858](https://github.com/gophercloud/gophercloud/pull/2858) [v1] build(deps): bump actions/upload-artifact from 3 to 4
* [GH-2859](https://github.com/gophercloud/gophercloud/pull/2859) [v1] build(deps): bump github/codeql-action from 2 to 3
## v1.8.0 (2023-11-30)
New features and improvements:

View file

@ -0,0 +1,739 @@
package ctxt
// This file is package context of the standard library that ships with Go
// v1.21.6. It has been vendored to import AfterFunc.
//
// Changes made to the original file:
// * replace "internal/reflectlite" with "reflect" in the imports
// * replace "any" with "interface{}"
// * remove the atomic.Int32 type that only exists for testing and is not
// compatible with Go v1.14.
//
// https://cs.opensource.google/go/go/+/refs/tags/go1.21.6:src/context/context.go
import (
"errors"
reflectlite "reflect"
"sync"
"sync/atomic"
"time"
)
// A Context carries a deadline, a cancellation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
// The close of the Done channel may happen asynchronously,
// after the cancel function returns.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See https://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancellation.
Done() <-chan struct{}
// If Done is not yet closed, Err returns nil.
// If Done is closed, Err returns a non-nil error explaining why:
// Canceled if the context was canceled
// or DeadlineExceeded if the context's deadline passed.
// After Err returns a non-nil error, successive calls to Err return the same error.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stored using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Canceled is the error returned by [Context.Err] when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by [Context.Err] when the context's
// deadline passes.
var DeadlineExceeded error = deadlineExceededError{}
type deadlineExceededError struct{}
func (deadlineExceededError) Error() string { return "context deadline exceeded" }
func (deadlineExceededError) Timeout() bool { return true }
func (deadlineExceededError) Temporary() bool { return true }
// An emptyCtx is never canceled, has no values, and has no deadline.
// It is the common base of backgroundCtx and todoCtx.
type emptyCtx struct{}
func (emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (emptyCtx) Done() <-chan struct{} {
return nil
}
func (emptyCtx) Err() error {
return nil
}
func (emptyCtx) Value(key interface{}) interface{} {
return nil
}
type backgroundCtx struct{ emptyCtx }
func (backgroundCtx) String() string {
return "context.Background"
}
type todoCtx struct{ emptyCtx }
func (todoCtx) String() string {
return "context.TODO"
}
// Background returns a non-nil, empty [Context]. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return backgroundCtx{}
}
// TODO returns a non-nil, empty [Context]. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter).
func TODO() Context {
return todoCtx{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// A CancelFunc may be called by multiple goroutines simultaneously.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := withCancel(parent)
return c, func() { c.cancel(true, Canceled, nil) }
}
// A CancelCauseFunc behaves like a [CancelFunc] but additionally sets the cancellation cause.
// This cause can be retrieved by calling [Cause] on the canceled Context or on
// any of its derived Contexts.
//
// If the context has already been canceled, CancelCauseFunc does not set the cause.
// For example, if childContext is derived from parentContext:
// - if parentContext is canceled with cause1 before childContext is canceled with cause2,
// then Cause(parentContext) == Cause(childContext) == cause1
// - if childContext is canceled with cause2 before parentContext is canceled with cause1,
// then Cause(parentContext) == cause1 and Cause(childContext) == cause2
type CancelCauseFunc func(cause error)
// WithCancelCause behaves like [WithCancel] but returns a [CancelCauseFunc] instead of a [CancelFunc].
// Calling cancel with a non-nil error (the "cause") records that error in ctx;
// it can then be retrieved using Cause(ctx).
// Calling cancel with nil sets the cause to Canceled.
//
// Example use:
//
// ctx, cancel := context.WithCancelCause(parent)
// cancel(myError)
// ctx.Err() // returns context.Canceled
// context.Cause(ctx) // returns myError
func WithCancelCause(parent Context) (ctx Context, cancel CancelCauseFunc) {
c := withCancel(parent)
return c, func(cause error) { c.cancel(true, Canceled, cause) }
}
func withCancel(parent Context) *cancelCtx {
if parent == nil {
panic("cannot create context from nil parent")
}
c := &cancelCtx{}
c.propagateCancel(parent, c)
return c
}
// Cause returns a non-nil error explaining why c was canceled.
// The first cancellation of c or one of its parents sets the cause.
// If that cancellation happened via a call to CancelCauseFunc(err),
// then [Cause] returns err.
// Otherwise Cause(c) returns the same value as c.Err().
// Cause returns nil if c has not been canceled yet.
func Cause(c Context) error {
if cc, ok := c.Value(&cancelCtxKey).(*cancelCtx); ok {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.cause
}
return nil
}
// AfterFunc arranges to call f in its own goroutine after ctx is done
// (cancelled or timed out).
// If ctx is already done, AfterFunc calls f immediately in its own goroutine.
//
// Multiple calls to AfterFunc on a context operate independently;
// one does not replace another.
//
// Calling the returned stop function stops the association of ctx with f.
// It returns true if the call stopped f from being run.
// If stop returns false,
// either the context is done and f has been started in its own goroutine;
// or f was already stopped.
// The stop function does not wait for f to complete before returning.
// If the caller needs to know whether f is completed,
// it must coordinate with f explicitly.
//
// If ctx has a "AfterFunc(func()) func() bool" method,
// AfterFunc will use it to schedule the call.
func AfterFunc(ctx Context, f func()) (stop func() bool) {
a := &afterFuncCtx{
f: f,
}
a.cancelCtx.propagateCancel(ctx, a)
return func() bool {
stopped := false
a.once.Do(func() {
stopped = true
})
if stopped {
a.cancel(true, Canceled, nil)
}
return stopped
}
}
type afterFuncer interface {
AfterFunc(func()) func() bool
}
type afterFuncCtx struct {
cancelCtx
once sync.Once // either starts running f or stops f from running
f func()
}
func (a *afterFuncCtx) cancel(removeFromParent bool, err, cause error) {
a.cancelCtx.cancel(false, err, cause)
if removeFromParent {
removeChild(a.Context, a)
}
a.once.Do(func() {
go a.f()
})
}
// A stopCtx is used as the parent context of a cancelCtx when
// an AfterFunc has been registered with the parent.
// It holds the stop function used to unregister the AfterFunc.
type stopCtx struct {
Context
stop func() bool
}
// &cancelCtxKey is the key that a cancelCtx returns itself for.
var cancelCtxKey int
// parentCancelCtx returns the underlying *cancelCtx for parent.
// It does this by looking up parent.Value(&cancelCtxKey) to find
// the innermost enclosing *cancelCtx and then checking whether
// parent.Done() matches that *cancelCtx. (If not, the *cancelCtx
// has been wrapped in a custom implementation providing a
// different done channel, in which case we should not bypass it.)
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
done := parent.Done()
if done == closedchan || done == nil {
return nil, false
}
p, ok := parent.Value(&cancelCtxKey).(*cancelCtx)
if !ok {
return nil, false
}
pdone, _ := p.done.Load().(chan struct{})
if pdone != done {
return nil, false
}
return p, true
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
if s, ok := parent.(stopCtx); ok {
s.stop()
return
}
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err, cause error)
Done() <-chan struct{}
}
// closedchan is a reusable closed channel.
var closedchan = make(chan struct{})
func init() {
close(closedchan)
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
mu sync.Mutex // protects following fields
done atomic.Value // of chan struct{}, created lazily, closed by first cancel call
children map[canceler]struct{} // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
cause error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Value(key interface{}) interface{} {
if key == &cancelCtxKey {
return c
}
return value(c.Context, key)
}
func (c *cancelCtx) Done() <-chan struct{} {
d := c.done.Load()
if d != nil {
return d.(chan struct{})
}
c.mu.Lock()
defer c.mu.Unlock()
d = c.done.Load()
if d == nil {
d = make(chan struct{})
c.done.Store(d)
}
return d.(chan struct{})
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
err := c.err
c.mu.Unlock()
return err
}
// propagateCancel arranges for child to be canceled when parent is.
// It sets the parent context of cancelCtx.
func (c *cancelCtx) propagateCancel(parent Context, child canceler) {
c.Context = parent
done := parent.Done()
if done == nil {
return // parent is never canceled
}
select {
case <-done:
// parent is already canceled
child.cancel(false, parent.Err(), Cause(parent))
return
default:
}
if p, ok := parentCancelCtx(parent); ok {
// parent is a *cancelCtx, or derives from one.
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err, p.cause)
} else {
if p.children == nil {
p.children = make(map[canceler]struct{})
}
p.children[child] = struct{}{}
}
p.mu.Unlock()
return
}
if a, ok := parent.(afterFuncer); ok {
// parent implements an AfterFunc method.
c.mu.Lock()
stop := a.AfterFunc(func() {
child.cancel(false, parent.Err(), Cause(parent))
})
c.Context = stopCtx{
Context: parent,
stop: stop,
}
c.mu.Unlock()
return
}
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err(), Cause(parent))
case <-child.Done():
}
}()
}
type stringer interface {
String() string
}
func contextName(c Context) string {
if s, ok := c.(stringer); ok {
return s.String()
}
return reflectlite.TypeOf(c).String()
}
func (c *cancelCtx) String() string {
return contextName(c.Context) + ".WithCancel"
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
// cancel sets c.cause to cause if this is the first time c is canceled.
func (c *cancelCtx) cancel(removeFromParent bool, err, cause error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
if cause == nil {
cause = err
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
c.cause = cause
d, _ := c.done.Load().(chan struct{})
if d == nil {
c.done.Store(closedchan)
} else {
close(d)
}
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err, cause)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithoutCancel returns a copy of parent that is not canceled when parent is canceled.
// The returned context returns no Deadline or Err, and its Done channel is nil.
// Calling [Cause] on the returned context returns nil.
func WithoutCancel(parent Context) Context {
if parent == nil {
panic("cannot create context from nil parent")
}
return withoutCancelCtx{parent}
}
type withoutCancelCtx struct {
c Context
}
func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (withoutCancelCtx) Done() <-chan struct{} {
return nil
}
func (withoutCancelCtx) Err() error {
return nil
}
func (c withoutCancelCtx) Value(key interface{}) interface{} {
return value(c, key)
}
func (c withoutCancelCtx) String() string {
return contextName(c.c) + ".WithoutCancel"
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// [Context.Done] channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
return WithDeadlineCause(parent, d, nil)
}
// WithDeadlineCause behaves like [WithDeadline] but also sets the cause of the
// returned Context when the deadline is exceeded. The returned [CancelFunc] does
// not set the cause.
func WithDeadlineCause(parent Context, d time.Time, cause error) (Context, CancelFunc) {
if parent == nil {
panic("cannot create context from nil parent")
}
if cur, ok := parent.Deadline(); ok && cur.Before(d) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
deadline: d,
}
c.cancelCtx.propagateCancel(parent, c)
dur := time.Until(d)
if dur <= 0 {
c.cancel(true, DeadlineExceeded, cause) // deadline has already passed
return c, func() { c.cancel(false, Canceled, nil) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(dur, func() {
c.cancel(true, DeadlineExceeded, cause)
})
}
return c, func() { c.cancel(true, Canceled, nil) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return contextName(c.cancelCtx.Context) + ".WithDeadline(" +
c.deadline.String() + " [" +
time.Until(c.deadline).String() + "])"
}
func (c *timerCtx) cancel(removeFromParent bool, err, cause error) {
c.cancelCtx.cancel(false, err, cause)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithTimeoutCause behaves like [WithTimeout] but also sets the cause of the
// returned Context when the timeout expires. The returned [CancelFunc] does
// not set the cause.
func WithTimeoutCause(parent Context, timeout time.Duration, cause error) (Context, CancelFunc) {
return WithDeadlineCause(parent, time.Now().Add(timeout), cause)
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The provided key must be comparable and should not be of type
// string or any other built-in type to avoid collisions between
// packages using context. Users of WithValue should define their own
// types for keys. To avoid allocating when assigning to an
// interface{}, context keys often have concrete type
// struct{}. Alternatively, exported context key variables' static
// type should be a pointer or interface.
func WithValue(parent Context, key, val interface{}) Context {
if parent == nil {
panic("cannot create context from nil parent")
}
if key == nil {
panic("nil key")
}
if !reflectlite.TypeOf(key).Comparable() {
panic("key is not comparable")
}
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
// stringify tries a bit to stringify v, without using fmt, since we don't
// want context depending on the unicode tables. This is only used by
// *valueCtx.String().
func stringify(v interface{}) string {
switch s := v.(type) {
case stringer:
return s.String()
case string:
return s
}
return "<not Stringer>"
}
func (c *valueCtx) String() string {
return contextName(c.Context) + ".WithValue(type " +
reflectlite.TypeOf(c.key).String() +
", val " + stringify(c.val) + ")"
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return value(c.Context, key)
}
func value(c Context, key interface{}) interface{} {
for {
switch ctx := c.(type) {
case *valueCtx:
if key == ctx.key {
return ctx.val
}
c = ctx.Context
case *cancelCtx:
if key == &cancelCtxKey {
return c
}
c = ctx.Context
case withoutCancelCtx:
if key == &cancelCtxKey {
// This implements Cause(ctx) == nil
// when ctx is created using WithoutCancel.
return nil
}
c = ctx.c
case *timerCtx:
if key == &cancelCtxKey {
return &ctx.cancelCtx
}
c = ctx.Context
case backgroundCtx, todoCtx:
return nil
default:
return c.Value(key)
}
}
}

View file

@ -0,0 +1,52 @@
// package ctxt implements context merging.
package ctxt
import (
"context"
"time"
)
type mergeContext struct {
context.Context
ctx2 context.Context
}
// Merge returns a context that is cancelled when at least one of the parents
// is cancelled. The returned context also returns the values of ctx1, or ctx2
// if nil.
func Merge(ctx1, ctx2 context.Context) (context.Context, context.CancelFunc) {
ctx, cancel := WithCancelCause(ctx1)
stop := AfterFunc(ctx2, func() {
cancel(Cause(ctx2))
})
return &mergeContext{
Context: ctx,
ctx2: ctx2,
}, func() {
stop()
cancel(context.Canceled)
}
}
// Value returns ctx2's value if ctx's is nil.
func (ctx *mergeContext) Value(key interface{}) interface{} {
if v := ctx.Context.Value(key); v != nil {
return v
}
return ctx.ctx2.Value(key)
}
// Deadline returns the earlier deadline of the two parents of ctx.
func (ctx *mergeContext) Deadline() (time.Time, bool) {
if d1, ok := ctx.Context.Deadline(); ok {
if d2, ok := ctx.ctx2.Deadline(); ok {
if d1.Before(d2) {
return d1, true
}
return d2, true
}
return d1, ok
}
return ctx.ctx2.Deadline()
}

View file

@ -10,11 +10,13 @@ import (
"net/http"
"strings"
"sync"
"github.com/gophercloud/gophercloud/internal/ctxt"
)
// DefaultUserAgent is the default User-Agent string set in the request header.
const (
DefaultUserAgent = "gophercloud/v1.8.0"
DefaultUserAgent = "gophercloud/v1.9.0"
DefaultMaxBackoffRetries = 60
)
@ -88,7 +90,9 @@ type ProviderClient struct {
// with the token and reauth func zeroed. Such client can be used to perform reauthorization.
Throwaway bool
// Context is the context passed to the HTTP request.
// Context is the context passed to the HTTP request. Values set on the
// per-call context, when available, override values set on this
// context.
Context context.Context
// Retry backoff func is called when rate limited.
@ -352,15 +356,20 @@ type requestState struct {
var applicationJSON = "application/json"
// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication
// header will automatically be provided.
func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) {
return client.doRequest(method, url, options, &requestState{
// RequestWithContext performs an HTTP request using the ProviderClient's
// current HTTPClient. An authentication header will automatically be provided.
func (client *ProviderClient) RequestWithContext(ctx context.Context, method, url string, options *RequestOpts) (*http.Response, error) {
return client.doRequest(ctx, method, url, options, &requestState{
hasReauthenticated: false,
})
}
func (client *ProviderClient) doRequest(method, url string, options *RequestOpts, state *requestState) (*http.Response, error) {
// Request is a compatibility wrapper for Request.
func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) {
return client.RequestWithContext(context.Background(), method, url, options)
}
func (client *ProviderClient) doRequest(ctx context.Context, method, url string, options *RequestOpts, state *requestState) (*http.Response, error) {
var body io.Reader
var contentType *string
@ -389,14 +398,16 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
body = options.RawBody
}
// Construct the http.Request.
req, err := http.NewRequest(method, url, body)
if client.Context != nil {
var cancel context.CancelFunc
ctx, cancel = ctxt.Merge(ctx, client.Context)
defer cancel()
}
req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil {
return nil, err
}
if client.Context != nil {
req = req.WithContext(client.Context)
}
// Populate the request headers.
// Apply options.MoreHeaders and options.OmitHeaders, to give the caller the chance to
@ -432,12 +443,12 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
if client.RetryFunc != nil {
var e error
state.retries = state.retries + 1
e = client.RetryFunc(client.Context, method, url, options, err, state.retries)
e = client.RetryFunc(ctx, method, url, options, err, state.retries)
if e != nil {
return nil, e
}
return client.doRequest(method, url, options, state)
return client.doRequest(ctx, method, url, options, state)
}
return nil, err
}
@ -491,7 +502,7 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
}
}
state.hasReauthenticated = true
resp, err = client.doRequest(method, url, options, state)
resp, err = client.doRequest(ctx, method, url, options, state)
if err != nil {
switch err.(type) {
case *ErrUnexpectedResponseCode:
@ -556,7 +567,7 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
return resp, e
}
return client.doRequest(method, url, options, state)
return client.doRequest(ctx, method, url, options, state)
}
case http.StatusInternalServerError:
err = ErrDefault500{respErr}
@ -592,7 +603,7 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
return resp, e
}
return client.doRequest(method, url, options, state)
return client.doRequest(ctx, method, url, options, state)
}
return resp, err
@ -616,7 +627,7 @@ func (client *ProviderClient) doRequest(method, url string, options *RequestOpts
return resp, e
}
return client.doRequest(method, url, options, state)
return client.doRequest(ctx, method, url, options, state)
}
return nil, err
}

View file

@ -1,6 +1,7 @@
package gophercloud
import (
"context"
"io"
"net/http"
"strings"
@ -59,58 +60,88 @@ func (client *ServiceClient) initReqOpts(JSONBody interface{}, JSONResponse inte
}
}
// Get calls `Request` with the "GET" HTTP verb.
func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
// GetWithContext calls `Request` with the "GET" HTTP verb.
func (client *ServiceClient) GetWithContext(ctx context.Context, url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(nil, JSONResponse, opts)
return client.Request("GET", url, opts)
return client.RequestWithContext(ctx, "GET", url, opts)
}
// Post calls `Request` with the "POST" HTTP verb.
// Get is a compatibility wrapper for GetWithContext.
func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
return client.GetWithContext(context.Background(), url, JSONResponse, opts)
}
// PostWithContext calls `Request` with the "POST" HTTP verb.
func (client *ServiceClient) PostWithContext(ctx context.Context, url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(JSONBody, JSONResponse, opts)
return client.RequestWithContext(ctx, "POST", url, opts)
}
// Post is a compatibility wrapper for PostWithContext.
func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
return client.PostWithContext(context.Background(), url, JSONBody, JSONResponse, opts)
}
// PutWithContext calls `Request` with the "PUT" HTTP verb.
func (client *ServiceClient) PutWithContext(ctx context.Context, url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(JSONBody, JSONResponse, opts)
return client.Request("POST", url, opts)
return client.RequestWithContext(ctx, "PUT", url, opts)
}
// Put calls `Request` with the "PUT" HTTP verb.
// Put is a compatibility wrapper for PurWithContext.
func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
return client.PutWithContext(context.Background(), url, JSONBody, JSONResponse, opts)
}
// PatchWithContext calls `Request` with the "PATCH" HTTP verb.
func (client *ServiceClient) PatchWithContext(ctx context.Context, url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(JSONBody, JSONResponse, opts)
return client.Request("PUT", url, opts)
return client.RequestWithContext(ctx, "PATCH", url, opts)
}
// Patch calls `Request` with the "PATCH" HTTP verb.
// Patch is a compatibility wrapper for PatchWithContext.
func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {
return client.PatchWithContext(context.Background(), url, JSONBody, JSONResponse, opts)
}
// DeleteWithContext calls `Request` with the "DELETE" HTTP verb.
func (client *ServiceClient) DeleteWithContext(ctx context.Context, url string, opts *RequestOpts) (*http.Response, error) {
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(JSONBody, JSONResponse, opts)
return client.Request("PATCH", url, opts)
client.initReqOpts(nil, nil, opts)
return client.RequestWithContext(ctx, "DELETE", url, opts)
}
// Delete calls `Request` with the "DELETE" HTTP verb.
// Delete is a compatibility wrapper for DeleteWithContext.
func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) {
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(nil, nil, opts)
return client.Request("DELETE", url, opts)
return client.DeleteWithContext(context.Background(), url, opts)
}
// Head calls `Request` with the "HEAD" HTTP verb.
func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) {
// HeadWithContext calls `Request` with the "HEAD" HTTP verb.
func (client *ServiceClient) HeadWithContext(ctx context.Context, url string, opts *RequestOpts) (*http.Response, error) {
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(nil, nil, opts)
return client.Request("HEAD", url, opts)
return client.RequestWithContext(ctx, "HEAD", url, opts)
}
// Head is a compatibility wrapper for HeadWithContext.
func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) {
return client.HeadWithContext(context.Background(), url, opts)
}
func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) {
@ -133,7 +164,7 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) {
}
// Request carries out the HTTP operation for the service client
func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) {
func (client *ServiceClient) RequestWithContext(ctx context.Context, method, url string, options *RequestOpts) (*http.Response, error) {
if options.MoreHeaders == nil {
options.MoreHeaders = make(map[string]string)
}
@ -151,7 +182,12 @@ func (client *ServiceClient) Request(method, url string, options *RequestOpts) (
options.MoreHeaders[k] = v
}
}
return client.ProviderClient.Request(method, url, options)
return client.ProviderClient.RequestWithContext(ctx, method, url, options)
}
// Request is a compatibility wrapper for RequestWithContext.
func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) {
return client.RequestWithContext(context.Background(), method, url, options)
}
// ParseResponse is a helper function to parse http.Response to constituents.

View file

@ -26,6 +26,7 @@ type Customizations struct {
Repositories []RepositoryCustomization `json:"repositories,omitempty" toml:"repositories,omitempty"`
FIPS *bool `json:"fips,omitempty" toml:"fips,omitempty"`
ContainersStorage *ContainerStorageCustomization `json:"containers-storage,omitempty" toml:"containers-storage,omitempty"`
Installer *InstallerCustomization `json:"installer,omitempty" toml:"installer,omitempty"`
}
type IgnitionCustomization struct {
@ -383,3 +384,10 @@ func (c *Customizations) GetContainerStorage() *ContainerStorageCustomization {
}
return c.ContainersStorage
}
func (c *Customizations) GetInstaller() *InstallerCustomization {
if c == nil || c.Installer == nil {
return nil
}
return c.Installer
}

View file

@ -0,0 +1,6 @@
package blueprint
type InstallerCustomization struct {
Unattended bool `json:"unattended,omitempty" toml:"unattended,omitempty"`
WheelSudoNopasswd bool `json:"wheel-sudo-nopasswd,omitempty" toml:"wheel-sudo-nopasswd,omitempty"`
}

View file

@ -31,8 +31,8 @@ type SourceSpec struct {
StoragePath *string
}
func NewResolver(arch string) Resolver {
return Resolver{
func NewResolver(arch string) *Resolver {
return &Resolver{
ctx: context.Background(),
queue: make(chan resolveResult, 2),
Arch: arch,

View file

@ -750,3 +750,20 @@ func (pt *PartitionTable) GetBuildPackages() []string {
return packages
}
// GetMountpointSize takes a mountpoint and returns the size of the entity this
// mountpoint belongs to.
func (pt *PartitionTable) GetMountpointSize(mountpoint string) (uint64, error) {
path := entityPath(pt, mountpoint)
if path == nil {
return 0, fmt.Errorf("cannot find mountpoint %s", mountpoint)
}
for _, ent := range path {
if sizeable, ok := ent.(Sizeable); ok {
return sizeable.GetSize(), nil
}
}
panic(fmt.Sprintf("no sizeable of the entity path for mountpoint %s, this is a programming error", mountpoint))
}

View file

@ -344,8 +344,15 @@ func imageInstallerImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
customizations := bp.Customizations
img := image.NewAnacondaTarInstaller()
if instCust := customizations.GetInstaller(); instCust != nil {
img.WheelNoPasswd = instCust.WheelSudoNopasswd
img.UnattendedKickstart = instCust.Unattended
}
// Enable anaconda-webui for Fedora > 38
distro := t.Arch().Distro()
if !common.VersionLessThan(distro.Releasever(), "38") {
@ -354,11 +361,20 @@ func imageInstallerImage(workload workload.Workload,
"org.fedoraproject.Anaconda.Modules.Timezone",
"org.fedoraproject.Anaconda.Modules.Localization",
}
img.AdditionalKernelOpts = []string{"inst.webui", "inst.webui.remote"}
if img.UnattendedKickstart {
// NOTE: this is not supported right now because the
// image-installer on Fedora isn't working when unattended.
// These options are probably necessary but could change.
// Unattended/non-interactive installations are better set to text
// time since they might be running headless and a UI is
// unnecessary.
img.AdditionalKernelOpts = []string{"inst.text", "inst.noninteractive"}
} else {
img.AdditionalKernelOpts = []string{"inst.webui", "inst.webui.remote"}
}
}
img.AdditionalAnacondaModules = append(img.AdditionalAnacondaModules, "org.fedoraproject.Anaconda.Modules.Users")
customizations := bp.Customizations
img.Platform = t.platform
img.Workload = workload
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], containers, customizations)
@ -513,12 +529,23 @@ func iotInstallerImage(workload workload.Workload,
img.ExtraBasePackages = packageSets[installerPkgsKey]
img.Users = users.UsersFromBP(customizations.GetUsers())
img.Groups = users.GroupsFromBP(customizations.GetGroups())
img.Language, img.Keyboard = customizations.GetPrimaryLocale()
// ignore ntp servers - we don't currently support setting these in the
// kickstart though kickstart does support setting them
img.Timezone, _ = customizations.GetTimezoneSettings()
img.AdditionalAnacondaModules = []string{
"org.fedoraproject.Anaconda.Modules.Timezone",
"org.fedoraproject.Anaconda.Modules.Localization",
"org.fedoraproject.Anaconda.Modules.Users",
}
if instCust := customizations.GetInstaller(); instCust != nil {
img.WheelNoPasswd = instCust.WheelSudoNopasswd
img.UnattendedKickstart = instCust.Unattended
}
img.SquashfsCompression = "lz4"
img.ISOLabelTempl = d.isolabelTmpl

View file

@ -333,12 +333,13 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
}
}
} else if t.name == "iot-installer" || t.name == "image-installer" {
allowed := []string{"User", "Group", "FIPS"}
// "Installer" is actually not allowed for image-installer right now, but this is checked at the end
allowed := []string{"User", "Group", "FIPS", "Installer", "Timezone", "Locale"}
if err := customizations.CheckAllowed(allowed...); err != nil {
return nil, fmt.Errorf(distro.UnsupportedCustomizationError, t.name, strings.Join(allowed, ", "))
}
} else if t.name == "live-installer" {
allowed := []string{}
allowed := []string{"Installer"}
if err := customizations.CheckAllowed(allowed...); err != nil {
return nil, fmt.Errorf(distro.NoCustomizationsAllowedError, t.name)
}
@ -403,5 +404,12 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
return []string{w}, nil
}
if customizations.GetInstaller() != nil {
// only supported by the Anaconda installer
if slices.Index([]string{"iot-installer"}, t.name) == -1 {
return nil, fmt.Errorf("installer customizations are not supported for %q", t.name)
}
}
return nil, nil
}

View file

@ -203,8 +203,10 @@ func bootableContainerPackageSet(t *imageType) rpmmd.PackageSet {
"crun",
"cryptsetup",
"dnf",
"dosfstools",
"e2fsprogs",
"fwupd", // if you're using linux-firmware, you probably also want fwupd
"fwupd", // if you're using linux-firmware, you probably also want fwupd
"gdisk",
"iproute", "iproute-tc", // route manipulation and QoS
"iptables", "nftables", // firewall manipulation
"iptables-services", // additional firewall support

View file

@ -16,7 +16,7 @@ func amiImgTypeX86_64(rd distribution) imageType {
packageSets: map[string]packageSetFunc{
osPkgsKey: ec2CommonPackageSet,
},
defaultImageConfig: defaultAMIImageConfigX86_64(rd),
defaultImageConfig: defaultAMIImageConfigX86_64(),
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
@ -24,19 +24,13 @@ func amiImgTypeX86_64(rd distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
basePartitionTables: ec2BasePartitionTables,
basePartitionTables: getEc2PartitionTables(rd.osVersion, rd.isRHEL()),
}
return it
}
func ec2ImgTypeX86_64(rd distribution) imageType {
basePartitionTables := ec2BasePartitionTables
// use legacy partition tables for RHEL 8.8 and older
if common.VersionLessThan(rd.osVersion, "8.9") {
basePartitionTables = ec2LegacyBasePartitionTables
}
it := imageType{
name: "ec2",
filename: "image.raw.xz",
@ -53,18 +47,12 @@ func ec2ImgTypeX86_64(rd distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: basePartitionTables,
basePartitionTables: getEc2PartitionTables(rd.osVersion, rd.isRHEL()),
}
return it
}
func ec2HaImgTypeX86_64(rd distribution) imageType {
basePartitionTables := ec2BasePartitionTables
// use legacy partition tables for RHEL 8.8 and older
if rd.isRHEL() && common.VersionLessThan(rd.osVersion, "8.9") {
basePartitionTables = ec2LegacyBasePartitionTables
}
it := imageType{
name: "ec2-ha",
filename: "image.raw.xz",
@ -81,7 +69,7 @@ func ec2HaImgTypeX86_64(rd distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: basePartitionTables,
basePartitionTables: getEc2PartitionTables(rd.osVersion, rd.isRHEL()),
}
return it
}
@ -94,7 +82,7 @@ func amiImgTypeAarch64(rd distribution) imageType {
packageSets: map[string]packageSetFunc{
osPkgsKey: ec2CommonPackageSet,
},
defaultImageConfig: defaultAMIImageConfig(rd),
defaultImageConfig: defaultAMIImageConfig(),
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
@ -102,18 +90,12 @@ func amiImgTypeAarch64(rd distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
basePartitionTables: ec2BasePartitionTables,
basePartitionTables: getEc2PartitionTables(rd.osVersion, rd.isRHEL()),
}
return it
}
func ec2ImgTypeAarch64(rd distribution) imageType {
basePartitionTables := ec2BasePartitionTables
// use legacy partition tables for RHEL 8.8 and older
if common.VersionLessThan(rd.osVersion, "8.9") {
basePartitionTables = ec2LegacyBasePartitionTables
}
it := imageType{
name: "ec2",
filename: "image.raw.xz",
@ -130,18 +112,12 @@ func ec2ImgTypeAarch64(rd distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: basePartitionTables,
basePartitionTables: getEc2PartitionTables(rd.osVersion, rd.isRHEL()),
}
return it
}
func ec2SapImgTypeX86_64(rd distribution) imageType {
basePartitionTables := ec2BasePartitionTables
// use legacy partition tables for RHEL 8.8 and older
if common.VersionLessThan(rd.osVersion, "8.9") {
basePartitionTables = ec2LegacyBasePartitionTables
}
it := imageType{
name: "ec2-sap",
filename: "image.raw.xz",
@ -158,7 +134,7 @@ func ec2SapImgTypeX86_64(rd distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: basePartitionTables,
basePartitionTables: getEc2PartitionTables(rd.osVersion, rd.isRHEL()),
}
return it
}
@ -227,7 +203,6 @@ func baseEc2ImageConfig() *distro.ImageConfig {
{
Filename: "00-getty-fixes.conf",
Config: osbuild.SystemdLogindConfigDropin{
Login: osbuild.SystemdLogindConfigLoginSection{
NAutoVTs: common.ToPtr(0),
},
@ -307,24 +282,23 @@ func defaultEc2ImageConfig(rd distribution) *distro.ImageConfig {
return ic
}
// default AMI (EC2 BYOS) images config
func defaultAMIImageConfig(rd distribution) *distro.ImageConfig {
ic := defaultEc2ImageConfig(rd)
if rd.isRHEL() {
// defaultEc2ImageConfig() adds the rhsm options only for RHEL < 8.7
// Add it unconditionally for AMI
ic = appendRHSM(ic)
}
return ic
}
func defaultEc2ImageConfigX86_64(rd distribution) *distro.ImageConfig {
ic := defaultEc2ImageConfig(rd)
return appendEC2DracutX86_64(ic)
}
func defaultAMIImageConfigX86_64(rd distribution) *distro.ImageConfig {
ic := defaultAMIImageConfig(rd).InheritFrom(defaultEc2ImageConfigX86_64(rd))
// Default AMI (custom image built by users) images config.
// The configuration does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
func defaultAMIImageConfig() *distro.ImageConfig {
return baseEc2ImageConfig()
}
// Default AMI x86_64 (custom image built by users) images config.
// The configuration does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
func defaultAMIImageConfigX86_64() *distro.ImageConfig {
ic := defaultAMIImageConfig()
return appendEC2DracutX86_64(ic)
}

View file

@ -11,7 +11,8 @@ import (
"github.com/osbuild/images/pkg/subscription"
)
const defaultAzureKernelOptions = "ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
// use loglevel=3 as described in the RHEL documentation and used in existing RHEL images built by MSFT
const defaultAzureKernelOptions = "ro loglevel=3 crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
func azureRhuiImgType() imageType {
return imageType{
@ -473,6 +474,7 @@ var azureRhuiBasePartitionTables = distro.BasePartitionTableMap{
},
}
// based on https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/deploying_rhel_8_on_microsoft_azure/assembly_deploying-a-rhel-image-as-a-virtual-machine-on-microsoft-azure_cloud-content-azure#making-configuration-changes_configure-the-image-azure
var defaultAzureImageConfig = &distro.ImageConfig{
Timezone: common.ToPtr("Etc/UTC"),
Locale: common.ToPtr("en_US.UTF-8"),
@ -584,10 +586,13 @@ var defaultAzureImageConfig = &distro.ImageConfig{
},
},
Grub2Config: &osbuild.GRUB2Config{
TerminalInput: []string{"serial", "console"},
TerminalOutput: []string{"serial", "console"},
Serial: "serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1",
Timeout: 10,
DisableRecovery: common.ToPtr(true),
DisableSubmenu: common.ToPtr(true),
Distributor: "$(sed 's, release .*$,,g' /etc/system-release)",
Terminal: []string{"serial", "console"},
Serial: "serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1",
Timeout: 10,
TimeoutStyle: osbuild.GRUB2ConfigTimeoutStyleCountdown,
},
UdevRules: &osbuild.UdevRulesStageOptions{
Filename: "/etc/udev/rules.d/68-azure-sriov-nm-unmanaged.rules",
@ -624,35 +629,12 @@ var defaultAzureImageConfig = &distro.ImageConfig{
}
// Diff of the default Image Config compare to the `defaultAzureImageConfig`
// The configuration for non-RHUI images does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
var defaultAzureByosImageConfig = &distro.ImageConfig{
GPGKeyFiles: []string{
"/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
},
RHSMConfig: map[subscription.RHSMStatus]*osbuild.RHSMStageOptions{
subscription.RHSMConfigNoSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// Don't disable RHSM redhat.repo management on the GCE
// image, which is BYOS and does not use RHUI for content.
// Otherwise subscribing the system manually after booting
// it would result in empty redhat.repo. Without RHUI, such
// system would have no way to get Red Hat content, but
// enable the repo management manually, which would be very
// confusing.
},
},
subscription.RHSMConfigWithSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// do not disable the redhat.repo management if the user
// explicitly request the system to be subscribed
},
},
},
}
// Diff of the default Image Config compare to the `defaultAzureImageConfig`

View file

@ -52,6 +52,8 @@ func gceRhuiImgType(rd distribution) imageType {
}
}
// The configuration for non-RHUI images does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
func defaultGceByosImageConfig(rd distribution) *distro.ImageConfig {
ic := &distro.ImageConfig{
Timezone: common.ToPtr("UTC"),
@ -156,33 +158,6 @@ func defaultGceByosImageConfig(rd distribution) *distro.ImageConfig {
)
}
if rd.isRHEL() {
ic.RHSMConfig = map[subscription.RHSMStatus]*osbuild.RHSMStageOptions{
subscription.RHSMConfigNoSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// Don't disable RHSM redhat.repo management on the GCE
// image, which is BYOS and does not use RHUI for content.
// Otherwise subscribing the system manually after booting
// it would result in empty redhat.repo. Without RHUI, such
// system would have no way to get Red Hat content, but
// enable the repo management manually, which would be very
// confusing.
},
},
subscription.RHSMConfigWithSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// do not disable the redhat.repo management if the user
// explicitly request the system to be subscribed
},
},
}
}
return ic
}

View file

@ -331,6 +331,11 @@ func imageInstallerImage(workload workload.Workload,
img.AdditionalDracutModules = []string{"prefixdevname", "prefixdevname-tools"}
img.AdditionalAnacondaModules = []string{"org.fedoraproject.Anaconda.Modules.Users"}
if instCust := customizations.GetInstaller(); instCust != nil {
img.WheelNoPasswd = instCust.WheelSudoNopasswd
img.UnattendedKickstart = instCust.Unattended
}
img.SquashfsCompression = "xz"
// put the kickstart file in the root of the iso
@ -437,6 +442,16 @@ func edgeInstallerImage(workload workload.Workload,
img.Users = users.UsersFromBP(customizations.GetUsers())
img.Groups = users.GroupsFromBP(customizations.GetGroups())
img.Language, img.Keyboard = customizations.GetPrimaryLocale()
// ignore ntp servers - we don't currently support setting these in the
// kickstart though kickstart does support setting them
img.Timezone, _ = customizations.GetTimezoneSettings()
if instCust := customizations.GetInstaller(); instCust != nil {
img.WheelNoPasswd = instCust.WheelSudoNopasswd
img.UnattendedKickstart = instCust.Unattended
}
img.SquashfsCompression = "xz"
img.AdditionalDracutModules = []string{"prefixdevname", "prefixdevname-tools"}

View file

@ -344,7 +344,7 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
}
}
} else if t.name == "edge-installer" {
allowed := []string{"User", "Group", "FIPS"}
allowed := []string{"User", "Group", "FIPS", "Installer", "Timezone", "Locale"}
if err := customizations.CheckAllowed(allowed...); err != nil {
return warnings, fmt.Errorf(distro.UnsupportedCustomizationError, t.name, strings.Join(allowed, ", "))
}
@ -439,5 +439,12 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
warnings = append(warnings, w)
}
if customizations.GetInstaller() != nil {
// only supported by the Anaconda installer
if slices.Index([]string{"image-installer", "edge-installer", "live-installer"}, t.name) == -1 {
return warnings, fmt.Errorf("installer customizations are not supported for %q", t.name)
}
}
return warnings, nil
}

View file

@ -117,182 +117,6 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
},
}
var ec2BasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 1 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
// ec2LegacyBasePartitionTables is the partition table layout for RHEL EC2
// images prior to 8.9. It is used for backwards compatibility.
var ec2LegacyBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 1 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 512 * common.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
var edgeBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
@ -423,3 +247,100 @@ var edgeBasePartitionTables = distro.BasePartitionTableMap{
},
},
}
func getEc2PartitionTables(osVersion string, isRHEL bool) distro.BasePartitionTableMap {
// x86_64 - without /boot
// aarch - <= 8.9 - 512MiB, 8.10 and centos: 1 GiB
var aarch64BootSize uint64
switch {
case common.VersionLessThan(osVersion, "8.10") && isRHEL:
aarch64BootSize = 512 * common.MebiByte
default:
aarch64BootSize = 1 * common.GibiByte
}
return distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 1 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: aarch64BootSize,
Type: disk.FilesystemDataGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
}

View file

@ -189,7 +189,6 @@ func baseEc2ImageConfig() *distro.ImageConfig {
{
Filename: "00-getty-fixes.conf",
Config: osbuild.SystemdLogindConfigDropin{
Login: osbuild.SystemdLogindConfigLoginSection{
NAutoVTs: common.ToPtr(0),
},
@ -266,24 +265,23 @@ func defaultEc2ImageConfig(osVersion string, rhsm bool) *distro.ImageConfig {
return ic
}
// default AMI (EC2 BYOS) images config
func defaultAMIImageConfig(osVersion string, rhsm bool) *distro.ImageConfig {
ic := defaultEc2ImageConfig(osVersion, rhsm)
if rhsm {
// defaultEc2ImageConfig() adds the rhsm options only for RHEL < 9.1
// Add it unconditionally for AMI
ic = appendRHSM(ic)
}
return ic
}
func defaultEc2ImageConfigX86_64(osVersion string, rhsm bool) *distro.ImageConfig {
ic := defaultEc2ImageConfig(osVersion, rhsm)
return appendEC2DracutX86_64(ic)
}
func defaultAMIImageConfigX86_64(osVersion string, rhsm bool) *distro.ImageConfig {
ic := defaultAMIImageConfig(osVersion, rhsm).InheritFrom(defaultEc2ImageConfigX86_64(osVersion, rhsm))
// Default AMI (custom image built by users) images config.
// The configuration does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
func defaultAMIImageConfig() *distro.ImageConfig {
return baseEc2ImageConfig()
}
// Default AMI x86_64 (custom image built by users) images config.
// The configuration does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
func defaultAMIImageConfigX86_64() *distro.ImageConfig {
ic := defaultAMIImageConfig()
return appendEC2DracutX86_64(ic)
}
@ -418,9 +416,9 @@ func mkEc2ImgTypeX86_64(osVersion string, rhsm bool) imageType {
return it
}
func mkAMIImgTypeX86_64(osVersion string, rhsm bool) imageType {
func mkAMIImgTypeX86_64() imageType {
it := amiImgTypeX86_64
ic := defaultAMIImageConfigX86_64(osVersion, rhsm)
ic := defaultAMIImageConfigX86_64()
it.defaultImageConfig = ic
return it
}
@ -438,9 +436,9 @@ func mkEc2HaImgTypeX86_64(osVersion string, rhsm bool) imageType {
return it
}
func mkAMIImgTypeAarch64(osVersion string, rhsm bool) imageType {
func mkAMIImgTypeAarch64() imageType {
it := amiImgTypeAarch64
ic := defaultAMIImageConfig(osVersion, rhsm)
ic := defaultAMIImageConfig()
it.defaultImageConfig = ic
return it
}

View file

@ -414,8 +414,10 @@ func azureRhuiBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
}
}
var defaultAzureKernelOptions = "ro console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
// use loglevel=3 as described in the RHEL documentation and used in existing RHEL images built by MSFT
var defaultAzureKernelOptions = "ro loglevel=3 console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
// based on https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/deploying_rhel_9_on_microsoft_azure/assembly_deploying-a-rhel-image-as-a-virtual-machine-on-microsoft-azure_cloud-content-azure#making-configuration-changes_configure-the-image-azure
var defaultAzureImageConfig = &distro.ImageConfig{
Timezone: common.ToPtr("Etc/UTC"),
Locale: common.ToPtr("en_US.UTF-8"),
@ -456,6 +458,12 @@ var defaultAzureImageConfig = &distro.ImageConfig{
osbuild.NewModprobeConfigCmdBlacklist("amdgpu"),
},
},
{
Filename: "blacklist-intel-cstate.conf",
Commands: osbuild.ModprobeConfigCmdList{
osbuild.NewModprobeConfigCmdBlacklist("intel_cstate"),
},
},
{
Filename: "blacklist-floppy.conf",
Commands: osbuild.ModprobeConfigCmdList{
@ -469,6 +477,12 @@ var defaultAzureImageConfig = &distro.ImageConfig{
osbuild.NewModprobeConfigCmdBlacklist("lbm-nouveau"),
},
},
{
Filename: "blacklist-skylake-edac.conf",
Commands: osbuild.ModprobeConfigCmdList{
osbuild.NewModprobeConfigCmdBlacklist("skx_edac"),
},
},
},
CloudInit: []*osbuild.CloudInitStageOptions{
{
@ -515,10 +529,13 @@ var defaultAzureImageConfig = &distro.ImageConfig{
},
},
Grub2Config: &osbuild.GRUB2Config{
TerminalInput: []string{"serial", "console"},
TerminalOutput: []string{"serial", "console"},
Serial: "serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1",
Timeout: 10,
DisableRecovery: common.ToPtr(true),
DisableSubmenu: common.ToPtr(true),
Distributor: "$(sed 's, release .*$,,g' /etc/system-release)",
Terminal: []string{"serial", "console"},
Serial: "serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1",
Timeout: 10,
TimeoutStyle: osbuild.GRUB2ConfigTimeoutStyleCountdown,
},
UdevRules: &osbuild.UdevRulesStageOptions{
Filename: "/etc/udev/rules.d/68-azure-sriov-nm-unmanaged.rules",
@ -555,35 +572,12 @@ var defaultAzureImageConfig = &distro.ImageConfig{
}
// Diff of the default Image Config compare to the `defaultAzureImageConfig`
// The configuration for non-RHUI images does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
var defaultAzureByosImageConfig = &distro.ImageConfig{
GPGKeyFiles: []string{
"/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
},
RHSMConfig: map[subscription.RHSMStatus]*osbuild.RHSMStageOptions{
subscription.RHSMConfigNoSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// Don't disable RHSM redhat.repo management on the GCE
// image, which is BYOS and does not use RHUI for content.
// Otherwise subscribing the system manually after booting
// it would result in empty redhat.repo. Without RHUI, such
// system would have no way to get Red Hat content, but
// enable the repo management manually, which would be very
// confusing.
},
},
subscription.RHSMConfigWithSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// do not disable the redhat.repo management if the user
// explicitly request the system to be subscribed
},
},
},
}
// Diff of the default Image Config compare to the `defaultAzureImageConfig`

View file

@ -253,7 +253,7 @@ func newDistro(name string, minor int) *distribution {
}
x86_64.addImageTypes(
ec2X86Platform,
mkAMIImgTypeX86_64(rd.osVersion, rd.isRHEL()),
mkAMIImgTypeX86_64(),
)
gceX86Platform := &platform.X86{
@ -264,7 +264,7 @@ func newDistro(name string, minor int) *distribution {
}
x86_64.addImageTypes(
gceX86Platform,
mkGCEImageType(rd.isRHEL()),
mkGCEImageType(),
)
x86_64.addImageTypes(
@ -391,7 +391,7 @@ func newDistro(name string, minor int) *distribution {
ImageFormat: platform.FORMAT_RAW,
},
},
mkAMIImgTypeAarch64(rd.osVersion, rd.isRHEL()),
mkAMIImgTypeAarch64(),
)
ppc64le.addImageTypes(
@ -455,7 +455,7 @@ func newDistro(name string, minor int) *distribution {
)
// add GCE RHUI image to RHEL only
x86_64.addImageTypes(gceX86Platform, mkGCERHUIImageType(rd.isRHEL()))
x86_64.addImageTypes(gceX86Platform, mkGCERHUIImageType())
} else {
x86_64.addImageTypes(azureX64Platform, azureImgType)
aarch64.addImageTypes(azureAarch64Platform, azureImgType)

View file

@ -48,19 +48,21 @@ var (
}
)
func mkGCEImageType(rhsm bool) imageType {
func mkGCEImageType() imageType {
it := gceImgType
it.defaultImageConfig = baseGCEImageConfig(rhsm)
// The configuration for non-RHUI images does not touch the RHSM configuration at all.
// https://issues.redhat.com/browse/COMPOSER-2157
it.defaultImageConfig = baseGCEImageConfig()
return it
}
func mkGCERHUIImageType(rhsm bool) imageType {
func mkGCERHUIImageType() imageType {
it := gceRhuiImgType
it.defaultImageConfig = defaultGceRhuiImageConfig(rhsm)
it.defaultImageConfig = defaultGceRhuiImageConfig()
return it
}
func baseGCEImageConfig(rhsm bool) *distro.ImageConfig {
func baseGCEImageConfig() *distro.ImageConfig {
ic := &distro.ImageConfig{
Timezone: common.ToPtr("UTC"),
TimeSynchronization: &osbuild.ChronyStageOptions{
@ -154,37 +156,10 @@ func baseGCEImageConfig(rhsm bool) *distro.ImageConfig {
},
}
if rhsm {
ic.RHSMConfig = map[subscription.RHSMStatus]*osbuild.RHSMStageOptions{
subscription.RHSMConfigNoSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// Don't disable RHSM redhat.repo management on the GCE
// image, which is BYOS and does not use RHUI for content.
// Otherwise subscribing the system manually after booting
// it would result in empty redhat.repo. Without RHUI, such
// system would have no way to get Red Hat content, but
// enable the repo management manually, which would be very
// confusing.
},
},
subscription.RHSMConfigWithSubscription: {
SubMan: &osbuild.RHSMStageOptionsSubMan{
Rhsmcertd: &osbuild.SubManConfigRHSMCERTDSection{
AutoRegistration: common.ToPtr(true),
},
// do not disable the redhat.repo management if the user
// explicitly request the system to be subscribed
},
},
}
}
return ic
}
func defaultGceRhuiImageConfig(rhsm bool) *distro.ImageConfig {
func defaultGceRhuiImageConfig() *distro.ImageConfig {
ic := &distro.ImageConfig{
RHSMConfig: map[subscription.RHSMStatus]*osbuild.RHSMStageOptions{
subscription.RHSMConfigNoSubscription: {
@ -208,7 +183,7 @@ func defaultGceRhuiImageConfig(rhsm bool) *distro.ImageConfig {
},
},
}
return ic.InheritFrom(baseGCEImageConfig(rhsm))
return ic.InheritFrom(baseGCEImageConfig())
}
func gceCommonPackageSet(t *imageType) rpmmd.PackageSet {

View file

@ -390,6 +390,16 @@ func edgeInstallerImage(workload workload.Workload,
img.Users = users.UsersFromBP(customizations.GetUsers())
img.Groups = users.GroupsFromBP(customizations.GetGroups())
img.Language, img.Keyboard = customizations.GetPrimaryLocale()
// ignore ntp servers - we don't currently support setting these in the
// kickstart though kickstart does support setting them
img.Timezone, _ = customizations.GetTimezoneSettings()
if instCust := customizations.GetInstaller(); instCust != nil {
img.WheelNoPasswd = instCust.WheelSudoNopasswd
img.UnattendedKickstart = instCust.Unattended
}
img.SquashfsCompression = "xz"
img.AdditionalDracutModules = []string{
"nvdimm", // non-volatile DIMM firmware (provides nfit, cuse, and nd_e820)
@ -595,6 +605,11 @@ func imageInstallerImage(workload workload.Workload,
img.AdditionalDrivers = []string{"cuse", "ipmi_devintf", "ipmi_msghandler"}
img.AdditionalAnacondaModules = []string{"org.fedoraproject.Anaconda.Modules.Users"}
if instCust := customizations.GetInstaller(); instCust != nil {
img.WheelNoPasswd = instCust.WheelSudoNopasswd
img.UnattendedKickstart = instCust.Unattended
}
img.SquashfsCompression = "xz"
// put the kickstart file in the root of the iso

View file

@ -167,7 +167,12 @@ func (t *imageType) getPartitionTable(
partitioningMode := options.PartitioningMode
if t.rpmOstree {
// Edge supports only LVM, force it.
// Raw is not supported, return an error if it is requested
// TODO Need a central location for logic like this
if partitioningMode == disk.RawPartitioningMode {
return nil, fmt.Errorf("partitioning mode raw not supported for %s on %s", t.Name(), t.arch.Name())
}
partitioningMode = disk.LVMPartitioningMode
}
@ -317,7 +322,7 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
}
if t.name == "edge-simplified-installer" {
allowed := []string{"InstallationDevice", "FDO", "Ignition", "Kernel", "User", "Group", "FIPS", "Filesystem"}
allowed := []string{"InstallationDevice", "FDO", "Ignition", "Kernel", "User", "Group", "FIPS"}
if err := customizations.CheckAllowed(allowed...); err != nil {
return warnings, fmt.Errorf(distro.UnsupportedCustomizationError, t.name, strings.Join(allowed, ", "))
}
@ -355,7 +360,7 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
}
}
} else if t.name == "edge-installer" {
allowed := []string{"User", "Group", "FIPS"}
allowed := []string{"User", "Group", "FIPS", "Installer", "Timezone", "Locale"}
if err := customizations.CheckAllowed(allowed...); err != nil {
return warnings, fmt.Errorf(distro.UnsupportedCustomizationError, t.name, strings.Join(allowed, ", "))
}
@ -367,7 +372,8 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
if options.OSTree == nil || options.OSTree.URL == "" {
return warnings, fmt.Errorf("%q images require specifying a URL from which to retrieve the OSTree commit", t.name)
}
allowed := []string{"Ignition", "Kernel", "User", "Group", "FIPS", "Filesystem"}
allowed := []string{"Ignition", "Kernel", "User", "Group", "FIPS"}
if err := customizations.CheckAllowed(allowed...); err != nil {
return warnings, fmt.Errorf(distro.UnsupportedCustomizationError, t.name, strings.Join(allowed, ", "))
}
@ -394,14 +400,9 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
}
mountpoints := customizations.GetFilesystems()
if mountpoints != nil && t.rpmOstree && (t.name == "edge-container" || t.name == "edge-commit") {
return warnings, fmt.Errorf("Custom mountpoints are not supported for edge-container and edge-commit")
} else if mountpoints != nil && t.rpmOstree && !(t.name == "edge-container" || t.name == "edge-commit") {
//customization allowed for edge-raw-image,edge-ami,edge-vsphere,edge-simplified-installer
err := blueprint.CheckMountpointsPolicy(mountpoints, policies.OstreeMountpointPolicies)
if err != nil {
return warnings, err
}
if mountpoints != nil && t.rpmOstree {
return warnings, fmt.Errorf("Custom mountpoints are not supported for ostree types")
}
err := blueprint.CheckMountpointsPolicy(mountpoints, policies.MountpointPolicies)
@ -454,5 +455,12 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
warnings = append(warnings, w)
}
if customizations.GetInstaller() != nil {
// only supported by the Anaconda installer
if slices.Index([]string{"image-installer", "edge-installer", "live-installer"}, t.name) == -1 {
return warnings, fmt.Errorf("installer customizations are not supported for %q", t.name)
}
}
return warnings, nil
}

View file

@ -7,10 +7,17 @@ import (
)
func defaultBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
// RHEL >= 9.3 needs to have a bigger /boot, see RHEL-7999
bootSize := uint64(600) * common.MebiByte
if common.VersionLessThan(t.arch.distro.osVersion, "9.3") && t.arch.distro.isRHEL() {
var bootSize uint64
switch {
case common.VersionLessThan(t.arch.distro.osVersion, "9.3") && t.arch.distro.isRHEL():
// RHEL <= 9.2 had only 500 MiB /boot
bootSize = 500 * common.MebiByte
case common.VersionLessThan(t.arch.distro.osVersion, "9.4") && t.arch.distro.isRHEL():
// RHEL 9.3 had 600 MiB /boot, see RHEL-7999
bootSize = 600 * common.MebiByte
default:
// RHEL >= 9.4 needs to have even a bigger /boot, see COMPOSER-2155
bootSize = 1 * common.GibiByte
}
switch t.platform.GetArch() {

View file

@ -10,6 +10,7 @@ import (
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/customizations/users"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/runner"
@ -98,6 +99,8 @@ func (img *AnacondaContainerInstaller) InstantiateManifest(m *manifest.Manifest,
bootTreePipeline.Platform = img.Platform
bootTreePipeline.UEFIVendor = img.Platform.GetUEFIVendor()
bootTreePipeline.ISOLabel = isoLabel
kspath := osbuild.KickstartPathOSBuild
bootTreePipeline.KernelOpts = []string{fmt.Sprintf("inst.stage2=hd:LABEL=%s", isoLabel), fmt.Sprintf("inst.ks=hd:LABEL=%s:%s", isoLabel, kspath)}
if img.FIPS {
bootTreePipeline.KernelOpts = append(bootTreePipeline.KernelOpts, "fips=1")

View file

@ -9,6 +9,7 @@ import (
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/customizations/users"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/ostree"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
@ -22,6 +23,16 @@ type AnacondaOSTreeInstaller struct {
Users []users.User
Groups []users.Group
Language *string
Keyboard *string
Timezone *string
// Create a sudoers drop-in file for wheel group with NOPASSWD option
WheelNoPasswd bool
// Add kickstart options to make the installation fully unattended
UnattendedKickstart bool
SquashfsCompression string
ISOLabelTempl string
@ -93,6 +104,8 @@ func (img *AnacondaOSTreeInstaller) InstantiateManifest(m *manifest.Manifest,
bootTreePipeline.Platform = img.Platform
bootTreePipeline.UEFIVendor = img.Platform.GetUEFIVendor()
bootTreePipeline.ISOLabel = isoLabel
kspath := osbuild.KickstartPathOSBuild
bootTreePipeline.KernelOpts = []string{fmt.Sprintf("inst.stage2=hd:LABEL=%s", isoLabel), fmt.Sprintf("inst.ks=hd:LABEL=%s:%s", isoLabel, kspath)}
if img.FIPS {
bootTreePipeline.KernelOpts = append(bootTreePipeline.KernelOpts, "fips=1")
@ -108,8 +121,12 @@ func (img *AnacondaOSTreeInstaller) InstantiateManifest(m *manifest.Manifest,
isoTreePipeline.Remote = img.Remote
isoTreePipeline.Users = img.Users
isoTreePipeline.Groups = img.Groups
isoTreePipeline.WheelNoPasswd = img.WheelNoPasswd
isoTreePipeline.UnattendedKickstart = img.UnattendedKickstart
isoTreePipeline.SquashfsCompression = img.SquashfsCompression
isoTreePipeline.Language = img.Language
isoTreePipeline.Keyboard = img.Keyboard
isoTreePipeline.Timezone = img.Timezone
// For ostree installers, always put the kickstart file in the root of the ISO
isoTreePipeline.KSPath = kspath

View file

@ -13,13 +13,12 @@ import (
"github.com/osbuild/images/pkg/customizations/users"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/runner"
)
const kspath = "/osbuild.ks"
func efiBootPartitionTable(rng *rand.Rand) *disk.PartitionTable {
var efibootImageSize uint64 = 20 * common.MebiByte
return &disk.PartitionTable{
@ -49,11 +48,22 @@ type AnacondaTarInstaller struct {
Users []users.User
Groups []users.Group
// If set, the kickstart file will be added to the bootiso-tree as
// /osbuild.ks, otherwise any kickstart options will be configured in the
// default /usr/share/anaconda/interactive-defaults.ks in the rootfs.
// If set, the kickstart file will be added to the bootiso-tree at the
// default path for osbuild, otherwise any kickstart options will be
// configured in the default location for interactive defaults in the
// rootfs. Enabling UnattendedKickstart automatically enables this option
// because automatic installations cannot be configured using interactive
// defaults.
ISORootKickstart bool
// Create a sudoers drop-in file for wheel group with NOPASSWD option
WheelNoPasswd bool
// Add kickstart options to make the installation fully unattended.
// Enabling this option also automatically enables the ISORootKickstart
// option.
UnattendedKickstart bool
SquashfsCompression string
ISOLabelTempl string
@ -84,6 +94,12 @@ func (img *AnacondaTarInstaller) InstantiateManifest(m *manifest.Manifest,
buildPipeline := manifest.NewBuild(m, runner, repos, nil)
buildPipeline.Checkpoint()
if img.UnattendedKickstart {
// if we're building an unattended installer, override the
// ISORootKickstart option
img.ISORootKickstart = true
}
anacondaPipeline := manifest.NewAnacondaInstaller(
manifest.AnacondaInstallerTypePayload,
buildPipeline,
@ -131,6 +147,7 @@ func (img *AnacondaTarInstaller) InstantiateManifest(m *manifest.Manifest,
bootTreePipeline.UEFIVendor = img.Platform.GetUEFIVendor()
bootTreePipeline.ISOLabel = isoLabel
kspath := osbuild.KickstartPathOSBuild
kernelOpts := []string{fmt.Sprintf("inst.stage2=hd:LABEL=%s", isoLabel)}
if img.ISORootKickstart {
kernelOpts = append(kernelOpts, fmt.Sprintf("inst.ks=hd:LABEL=%s:%s", isoLabel, kspath))
@ -150,16 +167,27 @@ func (img *AnacondaTarInstaller) InstantiateManifest(m *manifest.Manifest,
isoLinuxEnabled := img.Platform.GetArch() == arch.ARCH_X86_64
isoTreePipeline := manifest.NewAnacondaInstallerISOTree(buildPipeline, anacondaPipeline, rootfsImagePipeline, bootTreePipeline)
// TODO: the partition table is required - make it a ctor arg or set a default one in the pipeline
isoTreePipeline.PartitionTable = efiBootPartitionTable(rng)
isoTreePipeline.Release = img.Release
isoTreePipeline.OSName = img.OSName
isoTreePipeline.Users = img.Users
isoTreePipeline.Groups = img.Groups
isoTreePipeline.Keyboard = img.OSCustomizations.Keyboard
if img.OSCustomizations.Language != "" {
isoTreePipeline.Language = &img.OSCustomizations.Language
}
if img.OSCustomizations.Timezone != "" {
isoTreePipeline.Timezone = &img.OSCustomizations.Timezone
}
isoTreePipeline.PayloadPath = tarPath
if img.ISORootKickstart {
isoTreePipeline.KSPath = kspath
}
isoTreePipeline.WheelNoPasswd = img.WheelNoPasswd
isoTreePipeline.UnattendedKickstart = img.UnattendedKickstart
isoTreePipeline.SquashfsCompression = img.SquashfsCompression
isoTreePipeline.OSPipeline = osPipeline

View file

@ -3,6 +3,8 @@ package image
import (
"fmt"
"math/rand"
"path/filepath"
"strings"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/environment"
@ -87,8 +89,14 @@ func (img *DiskImage) InstantiateManifest(m *manifest.Manifest,
ovfPipeline := manifest.NewOVF(buildPipeline, vmdkPipeline)
tarPipeline := manifest.NewTar(buildPipeline, ovfPipeline, "archive")
tarPipeline.Format = osbuild.TarArchiveFormatUstar
tarPipeline.RootNode = osbuild.TarRootNodeOmit
tarPipeline.SetFilename(img.Filename)
extLess := strings.TrimSuffix(img.Filename, filepath.Ext(img.Filename))
// The .ovf descriptor needs to be the first file in the archive
tarPipeline.Paths = []string{
fmt.Sprintf("%s.ovf", extLess),
fmt.Sprintf("%s.mf", extLess),
fmt.Sprintf("%s.vmdk", extLess),
}
imagePipeline = tarPipeline
case platform.FORMAT_GCE:
// NOTE(akoutsou): temporary workaround; filename required for GCP

View file

@ -63,6 +63,7 @@ func (img *OSTreeArchive) InstantiateManifest(m *manifest.Manifest,
var artifact *artifact.Artifact
if img.BootContainer {
osPipeline.Bootupd = true
encapsulatePipeline := manifest.NewOSTreeEncapsulate(buildPipeline, ostreeCommitPipeline, "ostree-encapsulate")
encapsulatePipeline.SetFilename(img.Filename)
artifact = encapsulatePipeline.Export()

View file

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/customizations/fsnode"
@ -188,26 +189,17 @@ func (p *AnacondaInstaller) serializeEnd() {
p.packageSpecs = nil
}
func installerRootUser() osbuild.UsersStageOptionsUser {
return osbuild.UsersStageOptionsUser{
Password: common.ToPtr(""),
}
}
func (p *AnacondaInstaller) serialize() osbuild.Pipeline {
if len(p.packageSpecs) == 0 {
panic("serialization not started")
}
// Let's do a bunch of sanity checks that are dependent on the installer type
// being serialized
if p.Type == AnacondaInstallerTypeLive {
if len(p.Users) != 0 || len(p.Groups) != 0 {
panic("anaconda installer type payload does not support users and groups customization")
}
if p.InteractiveDefaults != nil {
panic("anaconda installer type payload does not support interactive defaults")
}
} else if p.Type == AnacondaInstallerTypePayload {
} else {
panic("invalid anaconda installer type")
}
pipeline := p.Base.serialize()
pipeline.AddStage(osbuild.NewRPMStage(osbuild.NewRPMStageOptions(p.repos), osbuild.NewRpmStageSourceFilesInputs(p.packageSpecs)))
@ -220,130 +212,143 @@ func (p *AnacondaInstaller) serialize() osbuild.Pipeline {
}))
pipeline.AddStage(osbuild.NewLocaleStage(&osbuild.LocaleStageOptions{Language: "en_US.UTF-8"}))
rootPassword := ""
rootUser := osbuild.UsersStageOptionsUser{
Password: &rootPassword,
}
var usersStageOptions *osbuild.UsersStageOptions
if p.Type == AnacondaInstallerTypePayload {
installUID := 0
installGID := 0
installHome := "/root"
installShell := "/usr/libexec/anaconda/run-anaconda"
installPassword := ""
installUser := osbuild.UsersStageOptionsUser{
UID: &installUID,
GID: &installGID,
Home: &installHome,
Shell: &installShell,
Password: &installPassword,
// Let's do a bunch of sanity checks that are dependent on the installer type
// being serialized
switch p.Type {
case AnacondaInstallerTypeLive:
if len(p.Users) != 0 || len(p.Groups) != 0 {
panic("anaconda installer type live does not support users and groups customization")
}
usersStageOptions = &osbuild.UsersStageOptions{
Users: map[string]osbuild.UsersStageOptionsUser{
"root": rootUser,
"install": installUser,
},
if p.InteractiveDefaults != nil {
panic("anaconda installer type live does not support interactive defaults")
}
} else if p.Type == AnacondaInstallerTypeLive {
usersStageOptions = &osbuild.UsersStageOptions{
Users: map[string]osbuild.UsersStageOptionsUser{
"root": rootUser,
},
}
}
pipeline.AddStage(osbuild.NewUsersStage(usersStageOptions))
if p.Type == AnacondaInstallerTypeLive {
systemdStageOptions := &osbuild.SystemdStageOptions{
EnabledServices: []string{
"livesys.service",
"livesys-late.service",
},
}
pipeline.AddStage(osbuild.NewSystemdStage(systemdStageOptions))
livesysMode := os.FileMode(int(0644))
livesysFile, err := fsnode.NewFile("/etc/sysconfig/livesys", &livesysMode, "root", "root", []byte("livesys_session=\"gnome\""))
if err != nil {
panic(err)
}
p.Files = []*fsnode.File{livesysFile}
pipeline.AddStages(osbuild.GenFileNodesStages(p.Files)...)
}
if p.Type == AnacondaInstallerTypePayload {
var LoraxPath string
if p.UseRHELLoraxTemplates {
LoraxPath = "80-rhel/runtime-postinstall.tmpl"
} else {
LoraxPath = "99-generic/runtime-postinstall.tmpl"
}
pipeline.AddStage(osbuild.NewAnacondaStage(osbuild.NewAnacondaStageOptions(p.AdditionalAnacondaModules)))
pipeline.AddStage(osbuild.NewLoraxScriptStage(&osbuild.LoraxScriptStageOptions{
Path: LoraxPath,
BaseArch: p.platform.GetArch().String(),
}))
}
var dracutModules []string
if p.Type == AnacondaInstallerTypePayload {
dracutModules = append(
p.AdditionalDracutModules,
"anaconda",
"rdma",
"rngd",
"multipath",
"fcoe",
"fcoe-uefi",
"iscsi",
"lunmask",
"nfs",
)
} else if p.Type == AnacondaInstallerTypeLive {
dracutModules = append(
p.AdditionalDracutModules,
"anaconda",
"rdma",
"rngd",
)
} else {
pipeline.AddStages(p.liveStages()...)
case AnacondaInstallerTypePayload:
pipeline.AddStages(p.payloadStages()...)
default:
panic("invalid anaconda installer type")
}
dracutOptions := dracutStageOptions(p.kernelVer, p.Biosdevname, dracutModules)
dracutOptions.AddDrivers = p.AdditionalDrivers
pipeline.AddStage(osbuild.NewDracutStage(dracutOptions))
pipeline.AddStage(osbuild.NewSELinuxConfigStage(&osbuild.SELinuxConfigStageOptions{State: osbuild.SELinuxStatePermissive}))
return pipeline
}
if p.Type == AnacondaInstallerTypePayload {
if p.InteractiveDefaults != nil {
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithLiveIMG(
"/usr/share/anaconda/interactive-defaults.ks",
p.Users,
p.Groups,
p.InteractiveDefaults.TarPath,
)
func (p *AnacondaInstaller) payloadStages() []*osbuild.Stage {
stages := make([]*osbuild.Stage, 0)
if err != nil {
panic("failed to create kickstartstage options for interactive defaults")
}
pipeline.AddStage(osbuild.NewKickstartStage(kickstartOptions))
}
installUID := 0
installGID := 0
installHome := "/root"
installShell := "/usr/libexec/anaconda/run-anaconda"
installPassword := ""
installUser := osbuild.UsersStageOptionsUser{
UID: &installUID,
GID: &installGID,
Home: &installHome,
Shell: &installShell,
Password: &installPassword,
}
return pipeline
usersStageOptions := &osbuild.UsersStageOptions{
Users: map[string]osbuild.UsersStageOptionsUser{
"root": installerRootUser(),
"install": installUser,
},
}
stages = append(stages, osbuild.NewUsersStage(usersStageOptions))
var LoraxPath string
if p.UseRHELLoraxTemplates {
LoraxPath = "80-rhel/runtime-postinstall.tmpl"
} else {
LoraxPath = "99-generic/runtime-postinstall.tmpl"
}
stages = append(stages, osbuild.NewAnacondaStage(osbuild.NewAnacondaStageOptions(p.AdditionalAnacondaModules)))
stages = append(stages, osbuild.NewLoraxScriptStage(&osbuild.LoraxScriptStageOptions{
Path: LoraxPath,
BaseArch: p.platform.GetArch().String(),
}))
dracutModules := append(
p.AdditionalDracutModules,
"anaconda",
"rdma",
"rngd",
"multipath",
"fcoe",
"fcoe-uefi",
"iscsi",
"lunmask",
"nfs",
)
dracutOptions := dracutStageOptions(p.kernelVer, p.Biosdevname, dracutModules)
dracutOptions.AddDrivers = p.AdditionalDrivers
stages = append(stages, osbuild.NewDracutStage(dracutOptions))
stages = append(stages, osbuild.NewSELinuxConfigStage(&osbuild.SELinuxConfigStageOptions{State: osbuild.SELinuxStatePermissive}))
if p.InteractiveDefaults != nil {
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithLiveIMG(
osbuild.KickstartPathInteractiveDefaults,
p.Users,
p.Groups,
p.InteractiveDefaults.TarPath,
)
if err != nil {
panic(fmt.Sprintf("failed to create kickstart stage options for interactive defaults: %v", err))
}
stages = append(stages, osbuild.NewKickstartStage(kickstartOptions))
}
return stages
}
func (p *AnacondaInstaller) liveStages() []*osbuild.Stage {
stages := make([]*osbuild.Stage, 0)
usersStageOptions := &osbuild.UsersStageOptions{
Users: map[string]osbuild.UsersStageOptionsUser{
"root": installerRootUser(),
},
}
stages = append(stages, osbuild.NewUsersStage(usersStageOptions))
systemdStageOptions := &osbuild.SystemdStageOptions{
EnabledServices: []string{
"livesys.service",
"livesys-late.service",
},
}
stages = append(stages, osbuild.NewSystemdStage(systemdStageOptions))
livesysMode := os.FileMode(int(0644))
livesysFile, err := fsnode.NewFile("/etc/sysconfig/livesys", &livesysMode, "root", "root", []byte("livesys_session=\"gnome\""))
if err != nil {
panic(err)
}
p.Files = []*fsnode.File{livesysFile}
stages = append(stages, osbuild.GenFileNodesStages(p.Files)...)
dracutModules := append(
p.AdditionalDracutModules,
"anaconda",
"rdma",
"rngd",
)
dracutOptions := dracutStageOptions(p.kernelVer, p.Biosdevname, dracutModules)
dracutOptions.AddDrivers = p.AdditionalDrivers
stages = append(stages, osbuild.NewDracutStage(dracutOptions))
stages = append(stages, osbuild.NewSELinuxConfigStage(&osbuild.SELinuxConfigStageOptions{State: osbuild.SELinuxStatePermissive}))
return stages
}
func dracutStageOptions(kernelVer string, biosdevname bool, additionalModules []string) *osbuild.DracutStageOptions {

View file

@ -4,6 +4,7 @@ import (
"fmt"
"path"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/customizations/users"
@ -27,6 +28,16 @@ type AnacondaInstallerISOTree struct {
Users []users.User
Groups []users.Group
Language *string
Keyboard *string
Timezone *string
// Create a sudoers drop-in file for wheel group with NOPASSWD option
WheelNoPasswd bool
// Add kickstart options to make the installation fully unattended
UnattendedKickstart bool
PartitionTable *disk.PartitionTable
anacondaPipeline *AnacondaInstaller
@ -322,112 +333,19 @@ func (p *AnacondaInstallerISOTree) serialize() osbuild.Pipeline {
copyInputs,
))
if p.ostreeCommitSpec != nil {
// Set up the payload ostree repo
pipeline.AddStage(osbuild.NewOSTreeInitStage(&osbuild.OSTreeInitStageOptions{Path: p.PayloadPath}))
pipeline.AddStage(osbuild.NewOSTreePullStage(
&osbuild.OSTreePullStageOptions{Repo: p.PayloadPath},
osbuild.NewOstreePullStageInputs("org.osbuild.source", p.ostreeCommitSpec.Checksum, p.ostreeCommitSpec.Ref),
))
// Configure the kickstart file with the payload and any user options
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithOSTreeCommit(
p.KSPath,
p.Users,
p.Groups,
makeISORootPath(p.PayloadPath),
p.ostreeCommitSpec.Ref,
p.Remote,
p.OSName)
if err != nil {
panic("failed to create kickstartstage options")
}
pipeline.AddStage(osbuild.NewKickstartStage(kickstartOptions))
}
if p.containerSpec != nil {
images := osbuild.NewContainersInputForSources([]container.Spec{*p.containerSpec})
pipeline.AddStage(osbuild.NewMkdirStage(&osbuild.MkdirStageOptions{
Paths: []osbuild.MkdirStagePath{
{
Path: p.PayloadPath,
},
},
}))
// copy the container in
pipeline.AddStage(osbuild.NewSkopeoStageWithOCI(
p.PayloadPath,
images,
nil))
// do what we can in our kickstart stage
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithOSTreeContainer(
"/osbuild-base.ks",
p.Users,
p.Groups,
path.Join("/run/install/repo", p.PayloadPath),
"oci",
"",
"")
if err != nil {
panic("failed to create kickstartstage options")
}
pipeline.AddStage(osbuild.NewKickstartStage(kickstartOptions))
// and what we can't do in a separate kickstart that we include
kickstartFile, err := fsnode.NewFile(p.KSPath, nil, nil, nil, []byte(`
%include /run/install/repo/osbuild-base.ks
rootpw --lock
lang en_US.UTF-8
keyboard us
timezone UTC
clearpart --all
reqpart --add-boot
part swap --fstype=swap --size=1024
part / --fstype=ext4 --grow
reboot --eject
`))
if err != nil {
panic(err)
}
p.Files = []*fsnode.File{kickstartFile}
pipeline.AddStages(osbuild.GenFileNodesStages(p.Files)...)
}
if p.OSPipeline != nil {
// Create the payload tarball
pipeline.AddStage(osbuild.NewTarStage(&osbuild.TarStageOptions{Filename: p.PayloadPath}, p.OSPipeline.name))
// If the KSPath is set, we need to add the kickstart stage to this (bootiso-tree) pipeline.
// If it's not specified here, it should have been added to the InteractiveDefaults in the anaconda-tree.
if p.KSPath != "" {
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithLiveIMG(
p.KSPath,
p.Users,
p.Groups,
makeISORootPath(p.PayloadPath))
if err != nil {
panic("failed to create kickstartstage options")
}
pipeline.AddStage(osbuild.NewKickstartStage(kickstartOptions))
if p.anacondaPipeline.Type == AnacondaInstallerTypePayload {
// the following pipelines are only relevant for payload installers
switch {
case p.ostreeCommitSpec != nil:
pipeline.AddStages(p.ostreeCommitStages()...)
case p.containerSpec != nil:
pipeline.AddStages(p.ostreeContainerStages()...)
case p.OSPipeline != nil:
pipeline.AddStages(p.tarPayloadStages()...)
default:
// this should have been caught at the top of the function, but
// let's check again in case we refactor the function.
panic("missing ostree, container, or ospipeline parameters in ISO tree pipeline")
}
}
@ -439,6 +357,210 @@ reboot --eject
return pipeline
}
func (p *AnacondaInstallerISOTree) ostreeCommitStages() []*osbuild.Stage {
stages := make([]*osbuild.Stage, 0)
// Set up the payload ostree repo
stages = append(stages, osbuild.NewOSTreeInitStage(&osbuild.OSTreeInitStageOptions{Path: p.PayloadPath}))
stages = append(stages, osbuild.NewOSTreePullStage(
&osbuild.OSTreePullStageOptions{Repo: p.PayloadPath},
osbuild.NewOstreePullStageInputs("org.osbuild.source", p.ostreeCommitSpec.Checksum, p.ostreeCommitSpec.Ref),
))
// Configure the kickstart file with the payload and any user options
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithOSTreeCommit(
p.KSPath,
p.Users,
p.Groups,
makeISORootPath(p.PayloadPath),
p.ostreeCommitSpec.Ref,
p.Remote,
p.OSName)
if err != nil {
panic(fmt.Sprintf("failed to create kickstart stage options: %v", err))
}
stages = append(stages, p.makeKickstartStages(kickstartOptions)...)
return stages
}
func (p *AnacondaInstallerISOTree) ostreeContainerStages() []*osbuild.Stage {
stages := make([]*osbuild.Stage, 0)
images := osbuild.NewContainersInputForSources([]container.Spec{*p.containerSpec})
stages = append(stages, osbuild.NewMkdirStage(&osbuild.MkdirStageOptions{
Paths: []osbuild.MkdirStagePath{
{
Path: p.PayloadPath,
},
},
}))
// copy the container in
stages = append(stages, osbuild.NewSkopeoStageWithOCI(
p.PayloadPath,
images,
nil))
// do what we can in our kickstart stage
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithOSTreeContainer(
p.KSPath,
p.Users,
p.Groups,
path.Join("/run/install/repo", p.PayloadPath),
"oci",
"",
"")
if err != nil {
panic(fmt.Sprintf("failed to create kickstart stage options: %v", err))
}
// NOTE: these are similar to the unattended kickstart options in the
// other two payload configurations but partitioning is different and
// we need to add that separately, so we can't use makeKickstartStage
kickstartOptions.RootPassword = &osbuild.RootPasswordOptions{
Lock: true,
}
// NOTE: These were decided somewhat arbitrarily for the BIB installer. We
// might want to drop them here and move them into the bib code as
// project-specific defaults.
kickstartOptions.Lang = "en_US.UTF-8"
kickstartOptions.Keyboard = "us"
kickstartOptions.Timezone = "UTC"
kickstartOptions.ClearPart = &osbuild.ClearPartOptions{
All: true,
}
stages = append(stages, osbuild.NewKickstartStage(kickstartOptions))
// and what we can't do in a separate kickstart that we include
targetContainerTransport := "registry"
if p.containerSpec.ContainersTransport != nil {
targetContainerTransport = *p.containerSpec.ContainersTransport
}
// Canonicalize to registry, as that's what the bootc stack wants
if targetContainerTransport == "docker://" {
targetContainerTransport = "registry"
}
// Because osbuild core only supports a subset of options, we append to the
// base here with some more hardcoded defaults
// that should very likely become configurable.
hardcodedKickstartBits := `
reqpart --add-boot
part swap --fstype=swap --size=1024
part / --fstype=ext4 --grow
reboot --eject
`
// Workaround for lack of --target-imgref in Anaconda, xref https://github.com/osbuild/images/issues/380
hardcodedKickstartBits += fmt.Sprintf(`%%post
bootc switch --mutate-in-place --transport %s %s
%%end
`, targetContainerTransport, p.containerSpec.LocalName)
kickstartFile, err := kickstartOptions.IncludeRaw(hardcodedKickstartBits)
if err != nil {
panic(err)
}
p.Files = []*fsnode.File{kickstartFile}
stages = append(stages, osbuild.GenFileNodesStages(p.Files)...)
return stages
}
func (p *AnacondaInstallerISOTree) tarPayloadStages() []*osbuild.Stage {
stages := make([]*osbuild.Stage, 0)
// Create the payload tarball
stages = append(stages, osbuild.NewTarStage(&osbuild.TarStageOptions{Filename: p.PayloadPath}, p.OSPipeline.name))
// If the KSPath is set, we need to add the kickstart stage to this (bootiso-tree) pipeline.
// If it's not specified here, it should have been added to the InteractiveDefaults in the anaconda-tree.
if p.KSPath != "" {
kickstartOptions, err := osbuild.NewKickstartStageOptionsWithLiveIMG(
p.KSPath,
p.Users,
p.Groups,
makeISORootPath(p.PayloadPath))
if err != nil {
panic(fmt.Sprintf("failed to create kickstart stage options: %v", err))
}
stages = append(stages, p.makeKickstartStages(kickstartOptions)...)
}
return stages
}
// Create the base kickstart stage with any options required for unattended
// installation if set and with any extra file insertion stage required for
// extra kickstart content.
func (p *AnacondaInstallerISOTree) makeKickstartStages(kickstartOptions *osbuild.KickstartStageOptions) []*osbuild.Stage {
stages := make([]*osbuild.Stage, 0)
if p.UnattendedKickstart {
// set the default options for Unattended kickstart
kickstartOptions.DisplayMode = "text"
// override options that can be configured by the image type or the user
kickstartOptions.Lang = "en_US.UTF-8"
if p.Language != nil {
kickstartOptions.Lang = *p.Language
}
kickstartOptions.Keyboard = "us"
if p.Keyboard != nil {
kickstartOptions.Keyboard = *p.Keyboard
}
kickstartOptions.Timezone = "UTC"
if p.Timezone != nil {
kickstartOptions.Timezone = *p.Timezone
}
kickstartOptions.Reboot = &osbuild.RebootOptions{Eject: true}
kickstartOptions.RootPassword = &osbuild.RootPasswordOptions{Lock: true}
kickstartOptions.ZeroMBR = true
kickstartOptions.ClearPart = &osbuild.ClearPartOptions{All: true, InitLabel: true}
kickstartOptions.AutoPart = &osbuild.AutoPartOptions{Type: "plain", FSType: "xfs", NoHome: true}
kickstartOptions.Network = []osbuild.NetworkOptions{
{BootProto: "dhcp", Device: "link", Activate: common.ToPtr(true), OnBoot: "on"},
}
}
stages = append(stages, osbuild.NewKickstartStage(kickstartOptions))
if p.WheelNoPasswd {
// Because osbuild core only supports a subset of options,
// we append to the base here with hardcoded wheel group with NOPASSWD option
hardcodedKickstartBits := `
%post
echo -e "%wheel\tALL=(ALL)\tNOPASSWD: ALL" > "/etc/sudoers.d/wheel"
chmod 0440 /etc/sudoers.d/wheel
restorecon -rvF /etc/sudoers.d
%end
`
kickstartFile, err := kickstartOptions.IncludeRaw(hardcodedKickstartBits)
if err != nil {
panic(err)
}
p.Files = []*fsnode.File{kickstartFile}
stages = append(stages, osbuild.GenFileNodesStages(p.Files)...)
}
return stages
}
// makeISORootPath return a path that can be used to address files and folders
// in the root of the iso
func makeISORootPath(p string) string {

View file

@ -152,6 +152,12 @@ type OS struct {
// OSTreeParent source spec (optional). If nil the new commit (if
// applicable) will have no parent
OSTreeParent *ostree.SourceSpec
// Enabling Bootupd runs bootupctl generate-update-metadata in the tree to
// transform /usr/lib/ostree-boot into a bootupd-compatible update
// payload. Only works with ostree-based images.
Bootupd bool
// Partition table, if nil the tree cannot be put on a partitioned disk
PartitionTable *disk.PartitionTable
@ -605,7 +611,6 @@ func (p *OS) serialize() osbuild.Pipeline {
Kernel: []string{p.kernelVer},
AddModules: []string{"fips"},
}))
p.Files = append(p.Files, osbuild.GenFIPSFiles()...)
}
if !p.KernelOptionsBootloader {
@ -727,6 +732,7 @@ func (p *OS) serialize() osbuild.Pipeline {
}
if p.FIPS {
p.Files = append(p.Files, osbuild.GenFIPSFiles()...)
for _, stage := range osbuild.GenFIPSStages() {
pipeline.AddStage(stage)
}
@ -768,6 +774,13 @@ func (p *OS) serialize() osbuild.Pipeline {
"wheel", "docker",
},
}))
if p.Bootupd {
pipeline.AddStage(osbuild.NewBootupdGenMetadataStage())
}
} else {
if p.Bootupd {
panic("bootupd is only compatible with ostree-based images, this is a programming error")
}
}
return pipeline

View file

@ -292,7 +292,6 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
if p.FIPS {
kernelOpts = append(kernelOpts, osbuild.GenFIPSKernelOptions(p.PartitionTable)...)
p.Files = append(p.Files, osbuild.GenFIPSFiles()...)
}
var ref string
@ -408,6 +407,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
}
if p.FIPS {
p.Files = append(p.Files, osbuild.GenFIPSFiles()...)
for _, stage := range osbuild.GenFIPSStages() {
stage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(stage)

View file

@ -122,7 +122,10 @@ func (p *RawOSTreeImage) serialize() osbuild.Pipeline {
func (p *RawOSTreeImage) addBootupdStage(pipeline *osbuild.Pipeline) {
pt := p.treePipeline.PartitionTable
treeBootupdDevices, treeBootupdMounts := osbuild.GenBootupdDevicesMounts(p.Filename(), pt)
treeBootupdDevices, treeBootupdMounts, err := osbuild.GenBootupdDevicesMounts(p.Filename(), pt)
if err != nil {
panic(err)
}
opts := &osbuild.BootupdStageOptions{
Deployment: &osbuild.OSTreeDeployment{
OSName: p.treePipeline.osName,

View file

@ -12,6 +12,7 @@ type Tar struct {
Format osbuild.TarArchiveFormat
RootNode osbuild.TarRootNode
Paths []string
ACLs *bool
SELinux *bool
Xattrs *bool
@ -50,6 +51,7 @@ func (p *Tar) serialize() osbuild.Pipeline {
SELinux: p.SELinux,
Xattrs: p.Xattrs,
RootNode: p.RootNode,
Paths: p.Paths,
}
tarStage := osbuild.NewTarStage(tarOptions, p.inputPipeline.Name())
pipeline.AddStage(tarStage)

View file

@ -0,0 +1,30 @@
package osbuild
import (
"fmt"
)
// NewBootcInstallToFilesystem creates a new stage for the
// org.osbuild.bootc.install-to-filesystem stage.
//
// It requires a mount setup so that bootupd can be run by bootc. I.e
// "/", "/boot" and "/boot/efi" need to be set up so that
// bootc/bootupd find and install all required bootloader bits.
//
// The mounts input should be generated with GenBootupdDevicesMounts.
func NewBootcInstallToFilesystemStage(inputs ContainersInput, devices map[string]Device, mounts []Mount) (*Stage, error) {
if err := validateBootupdMounts(mounts); err != nil {
return nil, err
}
if len(inputs.References) != 1 {
return nil, fmt.Errorf("expected exactly one container input but got: %v (%v)", len(inputs.References), inputs.References)
}
return &Stage{
Type: "org.osbuild.bootc.install-to-filesystem",
Inputs: inputs,
Devices: devices,
Mounts: mounts,
}, nil
}

View file

@ -0,0 +1,7 @@
package osbuild
func NewBootupdGenMetadataStage() *Stage {
return &Stage{
Type: "org.osbuild.bootupd.gen-metadata",
}
}

View file

@ -76,17 +76,56 @@ func NewBootupdStage(opts *BootupdStageOptions, devices map[string]Device, mount
}, nil
}
func GenBootupdDevicesMounts(filename string, pt *disk.PartitionTable) (map[string]Device, []Mount) {
_, mounts, devices, err := genMountsDevicesFromPt(filename, pt)
if err != nil {
panic(err)
func genMountsForBootupd(source string, pt *disk.PartitionTable) ([]Mount, error) {
mounts := make([]Mount, 0, len(pt.Partitions))
// note that we are not using pt.forEachMountable() here because we
// need to keep track of the partition number (even if it's not
// mountable)
for idx, part := range pt.Partitions {
if part.Payload == nil {
continue
}
// TODO: support things like LVM here via supporting "disk.Container"
mnt, ok := part.Payload.(disk.Mountable)
if !ok {
return nil, fmt.Errorf("type %v not supported by bootupd handling yet", mnt)
}
partNum := idx + 1
name := fmt.Sprintf("part%v", partNum)
mount, err := genOsbuildMount(name, source, mnt)
if err != nil {
return nil, err
}
mount.Partition = &partNum
mounts = append(mounts, *mount)
}
devices["disk"] = Device{
Type: "org.osbuild.loopback",
Options: &LoopbackDeviceOptions{
Filename: filename,
// this must be sorted in so that mounts do not shadow each other
sort.Slice(mounts, func(i, j int) bool {
return mounts[i].Target < mounts[j].Target
})
return mounts, nil
}
func GenBootupdDevicesMounts(filename string, pt *disk.PartitionTable) (map[string]Device, []Mount, error) {
devName := "disk"
devices := map[string]Device{
devName: Device{
Type: "org.osbuild.loopback",
Options: &LoopbackDeviceOptions{
Filename: filename,
Partscan: true,
},
},
}
mounts, err := genMountsForBootupd(devName, pt)
if err != nil {
return nil, nil, err
}
if err := validateBootupdMounts(mounts); err != nil {
return nil, nil, err
}
return devices, mounts
return devices, mounts, nil
}

View file

@ -225,6 +225,23 @@ func pathEscape(path string) string {
return strings.ReplaceAll(path, "/", "-")
}
func genOsbuildMount(name, source string, mnt disk.Mountable) (*Mount, error) {
mountpoint := mnt.GetMountpoint()
t := mnt.GetFSType()
switch t {
case "xfs":
return NewXfsMount(name, source, mountpoint), nil
case "vfat":
return NewFATMount(name, source, mountpoint), nil
case "ext4":
return NewExt4Mount(name, source, mountpoint), nil
case "btrfs":
return NewBtrfsMount(name, source, mountpoint), nil
default:
return nil, fmt.Errorf("unknown fs type " + t)
}
}
func genMountsDevicesFromPt(filename string, pt *disk.PartitionTable) (string, []Mount, map[string]Device, error) {
devices := make(map[string]Device, len(pt.Partitions))
mounts := make([]Mount, 0, len(pt.Partitions))
@ -232,24 +249,13 @@ func genMountsDevicesFromPt(filename string, pt *disk.PartitionTable) (string, [
genMounts := func(mnt disk.Mountable, path []disk.Entity) error {
stageDevices, name := getDevices(path, filename, false)
mountpoint := mnt.GetMountpoint()
if mountpoint == "/" {
fsRootMntName = name
}
var mount *Mount
t := mnt.GetFSType()
switch t {
case "xfs":
mount = NewXfsMount(name, name, mountpoint)
case "vfat":
mount = NewFATMount(name, name, mountpoint)
case "ext4":
mount = NewExt4Mount(name, name, mountpoint)
case "btrfs":
mount = NewBtrfsMount(name, name, mountpoint)
default:
return fmt.Errorf("unknown fs type " + t)
mount, err := genOsbuildMount(name, name, mnt)
if err != nil {
return err
}
mounts = append(mounts, *mount)

View file

@ -36,13 +36,5 @@ func NewGroupsStageOptions(groups []users.Group) *GroupsStageOptions {
}
func GenGroupsStage(groups []users.Group) *Stage {
options := &GroupsStageOptions{
Groups: make(map[string]GroupsStageOptionsGroup, len(groups)),
}
for _, group := range groups {
options.Groups[group.Name] = GroupsStageOptionsGroup{
GID: group.GID,
}
}
return NewGroupsStage(options)
return NewGroupsStage(NewGroupsStageOptions(groups))
}

View file

@ -83,8 +83,7 @@ type GRUB2BIOS struct {
type GRUB2LegacyConfig struct {
GRUB2Config
CmdLine string `json:"cmdline,omitempty"`
Distributor string `json:"distributor,omitempty"`
CmdLine string `json:"cmdline,omitempty"`
}
type GRUB2LegacyStageOptions struct {
@ -145,8 +144,7 @@ func NewGrub2LegacyStageOptions(cfg *GRUB2Config,
RootFS: GRUB2FSDesc{UUID: &rootFsUUID},
Entries: entries,
Config: &GRUB2LegacyConfig{
CmdLine: kopts,
Distributor: "$(sed 's, release .*$,,g' /etc/system-release)",
CmdLine: kopts,
},
}
@ -154,6 +152,13 @@ func NewGrub2LegacyStageOptions(cfg *GRUB2Config,
stageOptions.Config.GRUB2Config = *cfg
}
// NB: previously, the distributor was part of the GRUB2LegacyConfig struct and
// was always set. Now it is part of GRUB2Config, which could override it above.
// Set it here if it is not set.
if stageOptions.Config.Distributor == "" {
stageOptions.Config.Distributor = "$(sed 's, release .*$,,g' /etc/system-release)"
}
bootFs := pt.FindMountable("/boot")
if bootFs != nil {
bootFsUUID := uuid.MustParse(bootFs.GetFSSpec().UUID)

View file

@ -34,12 +34,25 @@ type GRUB2UEFI struct {
Unified bool `json:"unified,omitempty"`
}
type GRUB2ConfigTimeoutStyle string
const (
GRUB2ConfigTimeoutStyleCountdown GRUB2ConfigTimeoutStyle = "countdown"
GRUB2ConfigTimeoutStyleHidden GRUB2ConfigTimeoutStyle = "hidden"
GRUB2ConfigTimeoutStyleMenu GRUB2ConfigTimeoutStyle = "menu"
)
type GRUB2Config struct {
Default string `json:"default,omitempty"`
TerminalInput []string `json:"terminal_input,omitempty"`
TerminalOutput []string `json:"terminal_output,omitempty"`
Timeout int `json:"timeout,omitempty"`
Serial string `json:"serial,omitempty"`
Default string `json:"default,omitempty"`
DisableRecovery *bool `json:"disable_recovery,omitempty"`
DisableSubmenu *bool `json:"disable_submenu,omitempty"`
Distributor string `json:"distributor,omitempty"`
Terminal []string `json:"terminal,omitempty"`
TerminalInput []string `json:"terminal_input,omitempty"`
TerminalOutput []string `json:"terminal_output,omitempty"`
Timeout int `json:"timeout,omitempty"`
TimeoutStyle GRUB2ConfigTimeoutStyle `json:"timeout_style,omitempty"`
Serial string `json:"serial,omitempty"`
}
func (GRUB2StageOptions) isStageOptions() {}

View file

@ -1,6 +1,18 @@
package osbuild
import "github.com/osbuild/images/pkg/customizations/users"
import (
"fmt"
"path/filepath"
"strings"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/customizations/users"
)
const (
KickstartPathInteractiveDefaults = "/usr/share/anaconda/interactive-defaults.ks"
KickstartPathOSBuild = "/osbuild.ks"
)
type KickstartStageOptions struct {
// Where to place the kickstart file
@ -14,6 +26,17 @@ type KickstartStageOptions struct {
Users map[string]UsersStageOptionsUser `json:"users,omitempty"`
Groups map[string]GroupsStageOptionsGroup `json:"groups,omitempty"`
Lang string `json:"lang,omitempty"`
Keyboard string `json:"keyboard,omitempty"`
Timezone string `json:"timezone,omitempty"`
DisplayMode string `json:"display_mode,omitempty"`
Reboot *RebootOptions `json:"reboot,omitempty"`
RootPassword *RootPasswordOptions `json:"rootpw,omitempty"`
ZeroMBR bool `json:"zerombr,omitempty"`
ClearPart *ClearPartOptions `json:"clearpart,omitempty"`
AutoPart *AutoPartOptions `json:"autopart,omitempty"`
Network []NetworkOptions `json:"network,omitempty"`
}
type LiveIMGOptions struct {
@ -36,6 +59,60 @@ type OSTreeContainerOptions struct {
SignatureVerification bool `json:"signatureverification"`
}
type RebootOptions struct {
Eject bool `json:"eject,omitempty"`
KExec bool `json:"kexec,omitempty"`
}
type ClearPartOptions struct {
All bool `json:"all,omitempty"`
InitLabel bool `json:"initlabel,omitempty"`
Drives []string `json:"drives,omitempty"`
List []string `json:"list,omitempty"`
Linux bool `json:"linux,omitempty"`
}
type AutoPartOptions struct {
Type string `json:"type,omitempty"`
FSType string `json:"fstype,omitempty"`
NoLVM bool `json:"nolvm,omitempty"`
Encrypted bool `json:"encrypted,omitempty"`
PassPhrase string `json:"passphrase,omitempty"`
EscrowCert string `json:"escrowcert,omitempty"`
BackupPassPhrase bool `json:"backuppassphrase,omitempty"`
Cipher string `json:"cipher,omitempty"`
LuksVersion string `json:"luks-version,omitempty"`
PBKdf string `json:"pbkdf,omitempty"`
PBKdfMemory int `json:"pbkdf-memory,omitempty"`
PBKdfTime int `json:"pbkdf-time,omitempty"`
PBKdfIterations int `json:"pbkdf-iterations,omitempty"`
NoHome bool `json:"nohome,omitempty"`
}
type NetworkOptions struct {
Activate *bool `json:"activate,omitempty"`
BootProto string `json:"bootproto,omitempty"`
Device string `json:"device,omitempty"`
OnBoot string `json:"onboot,omitempty"`
IP string `json:"ip,omitempty"`
IPV6 string `json:"ipv6,omitempty"`
Gateway string `json:"gateway,omitempty"`
IPV6Gateway string `json:"ipv6gateway,omitempty"`
Nameservers []string `json:"nameservers,omitempty"`
Netmask string `json:"netmask,omitempty"`
Hostname string `json:"hostname,omitempty"`
ESSid string `json:"essid,omitempty"`
WPAKey string `json:"wpakey,omitempty"`
}
type RootPasswordOptions struct {
Lock bool `json:"lock,omitempty"`
PlainText bool `json:"plaintext,omitempty"`
IsCrypted bool `json:"iscrypted,omitempty"`
AllowSSH bool `json:"allow_ssh,omitempty"`
Password string `json:"password,omitempty"`
}
func (KickstartStageOptions) isStageOptions() {}
// Creates an Anaconda kickstart file
@ -153,3 +230,26 @@ func NewKickstartStageOptionsWithLiveIMG(
return options, nil
}
// IncludeRaw is used for adding raw text as an extension to the kickstart
// file. First it changes the filename of the existing kickstart stage options
// and then creates a new file with the given raw content and an %include
// statement at the top that points to the renamed file. The new raw content is
// generated in place of the original file and is returned as an fsnode.File.
// The raw content *should not* contain the %include statement.
func (options *KickstartStageOptions) IncludeRaw(raw string) (*fsnode.File, error) {
origPath := options.Path
origName := filepath.Base(origPath)
ext := filepath.Ext(origName)
// file.ext -> file-base.ext
newBaseName := strings.TrimSuffix(origName, ext) + "-base" + ext
options.Path = filepath.Join("/", newBaseName)
// include must point to full path when booted
includePath := filepath.Join("/run/install/repo", newBaseName)
rawBits := fmt.Sprintf("%%include %s\n%s", includePath, raw)
return fsnode.NewFile(origPath, nil, nil, nil, []byte(rawBits))
}

View file

@ -17,6 +17,9 @@ type LoopbackDeviceOptions struct {
// Lock (bsd lock) the device after opening it
Lock bool `json:"lock,omitempty"`
// Enable partition scanning as an option
Partscan bool `json:"partscan,omitempty"`
}
func (LoopbackDeviceOptions) isDeviceOptions() {}

View file

@ -1,11 +1,12 @@
package osbuild
type Mount struct {
Name string `json:"name"`
Type string `json:"type"`
Source string `json:"source,omitempty"`
Target string `json:"target,omitempty"`
Options MountOptions `json:"options,omitempty"`
Name string `json:"name"`
Type string `json:"type"`
Source string `json:"source,omitempty"`
Target string `json:"target,omitempty"`
Options MountOptions `json:"options,omitempty"`
Partition *int `json:"partition,omitempty"`
}
type MountOptions interface {

View file

@ -1,24 +0,0 @@
package osbuild
// The ScriptStageOptions specifies a custom script to run in the image
type ScriptStageOptions struct {
Script string `json:"script"`
}
func (ScriptStageOptions) isStageOptions() {}
// NewScriptStageOptions creates a new script stage options object, with
// the mandatory fields set.
func NewScriptStageOptions(script string) *ScriptStageOptions {
return &ScriptStageOptions{
Script: script,
}
}
// NewScriptStage creates a new Script Stage object.
func NewScriptStage(options *ScriptStageOptions) *Stage {
return &Stage{
Type: "org.osbuild.script",
Options: options,
}
}

View file

@ -39,6 +39,9 @@ type TarStageOptions struct {
// How to handle the root node: include or omit
RootNode TarRootNode `json:"root-node,omitempty"`
// List of paths to include, instead of the whole tree
Paths []string `json:"paths,omitempty"`
}
func (TarStageOptions) isStageOptions() {}
@ -81,6 +84,10 @@ func (o TarStageOptions) validate() error {
}
}
if len(o.Paths) > 0 && o.RootNode != "" {
return fmt.Errorf("'paths' cannot be combined with 'root-node'")
}
return nil
}

View file

@ -71,41 +71,9 @@ func NewUsersStageOptions(userCustomizations []users.User, omitKey bool) (*Users
}
func GenUsersStage(users []users.User, omitKey bool) (*Stage, error) {
options := &UsersStageOptions{
Users: make(map[string]UsersStageOptionsUser, len(users)),
options, err := NewUsersStageOptions(users, omitKey)
if err != nil {
return nil, err
}
for _, user := range users {
// Don't hash empty passwords, set to nil to lock account
if user.Password != nil && len(*user.Password) == 0 {
user.Password = nil
}
// Hash non-empty un-hashed passwords
if user.Password != nil && !crypt.PasswordIsCrypted(*user.Password) {
cryptedPassword, err := crypt.CryptSHA512(*user.Password)
if err != nil {
return nil, err
}
user.Password = &cryptedPassword
}
userOptions := UsersStageOptionsUser{
UID: user.UID,
GID: user.GID,
Groups: user.Groups,
Description: user.Description,
Home: user.Home,
Shell: user.Shell,
Password: user.Password,
Key: nil,
}
if !omitKey {
userOptions.Key = user.Key
}
options.Users[user.Name] = userOptions
}
return NewUsersStage(options), nil
}

View file

@ -9,13 +9,6 @@ type ZiplStageOptions struct {
func (ZiplStageOptions) isStageOptions() {}
// NewZiplStageOptions creates a new ZiplStageOptions object with no timeout
func NewZiplStageOptions() *ZiplStageOptions {
return &ZiplStageOptions{
Timeout: 0,
}
}
// NewZiplStage creates a new zipl Stage object.
func NewZiplStage(options *ZiplStageOptions) *Stage {
return &Stage{

View file

@ -9,7 +9,6 @@ Adam Chalkley <atc0005@users.noreply.github.com>
Adam Fowler <adam@adamfowler.org>
Adam Shannon <adamkshannon@gmail.com>
Akanksha Panse <pansea@vmware.com>
akutz <akutz@users.noreply.github.com>
Al Biheiri <abiheiri@apple.com>
Alessandro Cortiana <alessandro.cortiana@gmail.com>
Alex <puzo2002@gmail.com>
@ -49,15 +48,20 @@ Benjamin Vickers <bvickers@vmware.com>
Bhavya Choudhary <bhavyac@vmware.com>
Bob Killen <killen.bob@gmail.com>
Brad Fitzpatrick <bradfitz@golang.org>
Brian McClain <brianmmcclain@gmail.com>
Brian Rak <brak@vmware.com>
brian57860 <brian57860@users.noreply.github.com>
Bruce Downs <bruceadowns@gmail.com>
Bruno Meneguello <1322552+bkmeneguello@users.noreply.github.com>
Bryan Venteicher <bryanventeicher@gmail.com>
C S P Nanda <cspn@google.com>
Carsten Grohmann <mail@carstengrohmann.de>
Cheng Cheng <chengch@vmware.com>
Chethan Venkatesh <chethanv@vmware.com>
Choudhury Sarada Prasanna Nanda <cspn@google.com>
Chris Marchesi <chrism@vancluevertech.com>
Christian Höltje <docwhat@gerf.org>
Christian Schlotter <christi.schlotter@gmail.com>
Clint Greenwood <cgreenwood@vmware.com>
cpiment <pimentel.carlos@gmail.com>
CuiHaozhi <cuihaozhi@chinacloud.com.cn>
@ -76,25 +80,27 @@ Davide Agnello <dagnello@hp.com>
Davinder Kumar <davinderk@vmware.com>
Defa <zhoudefa666@163.com>
demarey <christophe.demarey@inria.fr>
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Deric Crago <deric.crago@gmail.com>
Deyan Popov <deyan.popov@gmail.com>
Dinesh Bhat <35480850+dbhat-arkin@users.noreply.github.com>
ditsuke <ditsuke@protonmail.com>
Divyen Patel <divyenp@vmware.com>
Dnyanesh Gate <dnyanesh.gate@druva.com>
Doug MacEachern <dougm@vmware.com>
East <60801291+houfangdong@users.noreply.github.com>
Eloy Coto <eloy.coto@gmail.com>
embano1 <embano1@users.noreply.github.com>
Eng Zer Jun <engzerjun@gmail.com>
Eric Edens <ericedens@google.com>
Eric Graham <16710890+Pheric@users.noreply.github.com>
Eric Gray <egray@vmware.com>
Eric Yutao <eric.yutao@gmail.com>
Erik Hollensbe <github@hollensbe.org>
Erik Lund <info@erikjensen.it>
Essodjolo KAHANAM <essodjolo@kahanam.com>
Ethan Kaley <ethan.kaley@emc.com>
Evan Chu <echu@vmware.com>
Fabio Rapposelli <fabio@vmware.com>
fabriziopandini <fpandini@vmware.com>
Faiyaz Ahmed <faiyaza@vmware.com>
Federico Pellegatta <12744504+federico-pellegatta@users.noreply.github.com>
forkbomber <forkbomber@users.noreply.github.com>
@ -103,8 +109,10 @@ freebsdly <qinhuajun@outlook.com>
Gavin Gray <gavin@infinio.com>
Gavrie Philipson <gavrie.philipson@elastifile.com>
George Hicken <ghicken@vmware.com>
George Hicken <hickeng@users.noreply.github.com>
Gerrit Renker <Gerrit.Renker@ctl.io>
gthombare <gthombare@vmware.com>
guoguangwu <guoguangwu@magic-shield.com>
Hakan Halil <hhalil@vmware.com>
HakanSunay <hakansunay@abv.bg>
Hasan Mahmood <mahmoodh@vmware.com>
@ -127,18 +135,24 @@ Jiatong Wang <wjiatong@vmware.com>
jingyizPensando <jingyiz@pensando.io>
Jonas Ausevicius <jonas.ausevicius@virtustream.com>
Jorge Sevilla <jorge.sevilla@rstor.io>
Joseph LeBlanc <jsleblanc@users.noreply.github.com>
João Pereira <joaodrp@gmail.com>
Julien PILLON <jpillon@lesalternatives.org>
Justin J. Novack <jnovack@users.noreply.github.com>
kayrus <kay.diam@gmail.com>
Keenan Brock <keenan@thebrocks.net>
Kevin George <georgek@vmware.com>
Kiril Karaatanassov <kkaraatanassov@vmware.com>
Knappek <andy.knapp.ak@gmail.com>
Kristian Alvestad <kristian.alvestad@statnett.no>
Lars Lehtonen <lars.lehtonen@gmail.com>
Leslie Wang <qiwa@pensando.io>
leslie-qiwa <leslie.qiwa@gmail.com>
Lintong Jiang <lintongj@vmware.com>
Liping Xue <lipingx@vmware.com>
liron.levin <liron.levin@wiz.io>
Louie Jiang <jiangl@vmware.com>
Lubron Zhan <lzhan@vmware.com>
Luther Monson <luther.monson@gmail.com>
Madanagopal Arunachalam <marunachalam@vmware.com>
makelarisjr <8687447+makelarisjr@users.noreply.github.com>
@ -157,10 +171,10 @@ Matt Clay <matt@mystile.com>
Matt Moore <mattmoor@vmware.com>
Matt Moriarity <matt@mattmoriarity.com>
Matthew Cosgrove <matthew.cosgrove@dell.com>
Mayank Bhatt <bmayank@vmware.com>
mbhadale <mbhadale@vmware.com>
Merlijn Sebrechts <merlijn.sebrechts@gmail.com>
Mevan Samaratunga <mevansam@gmail.com>
Michael Gasch <15986659+embano1@users.noreply.github.com>
Michael Gasch <mgasch@vmware.com>
Michal Jankowski <mjankowski@vmware.com>
Mike Schinkel <mike@newclarity.net>
@ -188,6 +202,7 @@ rconde01 <rconde01@hotmail.com>
rHermes <teodor_spaeren@riseup.net>
Rianto Wahyudi <rwahyudi@gmail.com>
Ricardo Katz <rkatz@vmware.com>
rmcqueen <rmcqueen@vmware.com>
Robin Watkins <robwatkins@gmail.com>
Rowan Jacobs <rojacobs@pivotal.io>
Roy Ling <royling0024@gmail.com>
@ -200,6 +215,7 @@ Saad Malik <saad@spectrocloud.com>
Sam Zhu <zhusa@zhusa-a02.vmware.com>
samzhu333 <45263849+samzhu333@users.noreply.github.com>
Sandeep Pissay Srinivasa Rao <ssrinivas@vmware.com>
schmikei <keith.schmitt@bluemedora.com>
Scott Holden <scott@nullops.io>
Sergey Ignatov <sergey.ignatov@jetbrains.com>
serokles <timbo.alexander@gmail.com>
@ -233,7 +249,9 @@ tshihad <tshihad9@gmail.com>
Ueli Banholzer <ueli@whatwedo.ch>
Uwe Bessle <Uwe.Bessle@iteratec.de>
Vadim Egorov <vegorov@vmware.com>
Vamshik Shetty <svamshik@vmware.com>
Vikram Krishnamurthy <vikramkrishnamu@vmware.com>
Vipul Kotkar <vkotkar@vmware.com>
volanja <volaaanja@gmail.com>
Volodymyr Bobyr <pupsua@gmail.com>
Waldek Maleska <w.maleska@gmail.com>

View file

@ -392,9 +392,10 @@ func init() {
type CnsBlockBackingDetails struct {
CnsBackingObjectDetails
BackingDiskId string `xml:"backingDiskId,omitempty"`
BackingDiskUrlPath string `xml:"backingDiskUrlPath,omitempty"`
BackingDiskObjectId string `xml:"backingDiskObjectId,omitempty"`
BackingDiskId string `xml:"backingDiskId,omitempty"`
BackingDiskUrlPath string `xml:"backingDiskUrlPath,omitempty"`
BackingDiskObjectId string `xml:"backingDiskObjectId,omitempty"`
AggregatedSnapshotCapacityInMb int64 `xml:"aggregatedSnapshotCapacityInMb,omitempty"`
}
func init() {

View file

@ -835,7 +835,7 @@ func (f *Finder) networkByID(ctx context.Context, path string) (object.NetworkRe
}
defer v.Destroy(ctx)
filter := property.Filter{
filter := property.Match{
"config.logicalSwitchUuid": path,
"config.segmentId": path,
}

View file

@ -21,5 +21,5 @@ const (
ClientName = "govmomi"
// ClientVersion is the version of this SDK
ClientVersion = "0.34.2"
ClientVersion = "0.35.0"
)

View file

@ -1,11 +1,11 @@
/*
Copyright (c) 2015 VMware, Inc. All Rights Reserved.
Copyright (c) 2015-2024 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@ -18,6 +18,7 @@ package object
import (
"context"
"fmt"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/task"
@ -43,18 +44,53 @@ func NewTask(c *vim25.Client, ref types.ManagedObjectReference) *Task {
return &t
}
// Deprecated: Please use WaitEx instead.
func (t *Task) Wait(ctx context.Context) error {
_, err := t.WaitForResult(ctx, nil)
return err
}
func (t *Task) WaitForResult(ctx context.Context, s ...progress.Sinker) (*types.TaskInfo, error) {
// Deprecated: Please use WaitForResultEx instead.
func (t *Task) WaitForResult(ctx context.Context, s ...progress.Sinker) (taskInfo *types.TaskInfo, result error) {
var pr progress.Sinker
if len(s) == 1 {
pr = s[0]
}
p, err := property.DefaultCollector(t.c).Create(ctx)
if err != nil {
return nil, err
}
// Attempt to destroy the collector using the background context, as the
// specified context may have timed out or have been canceled.
defer func() {
if err := p.Destroy(context.Background()); err != nil {
if result == nil {
result = err
} else {
result = fmt.Errorf(
"destroy property collector failed with %s after failing to wait for updates: %w",
err,
result)
}
}
}()
return task.WaitEx(ctx, t.Reference(), p, pr)
}
func (t *Task) WaitEx(ctx context.Context) error {
_, err := t.WaitForResultEx(ctx, nil)
return err
}
func (t *Task) WaitForResultEx(ctx context.Context, s ...progress.Sinker) (*types.TaskInfo, error) {
var pr progress.Sinker
if len(s) == 1 {
pr = s[0]
}
p := property.DefaultCollector(t.c)
return task.Wait(ctx, t.Reference(), p, pr)
return task.WaitEx(ctx, t.Reference(), p, pr)
}
func (t *Task) Cancel(ctx context.Context) error {

View file

@ -1,5 +1,5 @@
/*
Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved.
Copyright (c) 2015-2024 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -19,6 +19,8 @@ package property
import (
"context"
"errors"
"fmt"
"sync"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
@ -27,11 +29,19 @@ import (
"github.com/vmware/govmomi/vim25/types"
)
// ErrConcurrentCollector is returned from WaitForUpdates, WaitForUpdatesEx,
// or CheckForUpdates if any of those calls are unable to obtain an exclusive
// lock for the property collector.
var ErrConcurrentCollector = fmt.Errorf(
"only one goroutine may invoke WaitForUpdates, WaitForUpdatesEx, " +
"or CheckForUpdates on a given PropertyCollector")
// Collector models the PropertyCollector managed object.
//
// For more information, see:
// http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.wssdk.apiref.doc%2Fvmodl.query.PropertyCollector.html
type Collector struct {
mu sync.Mutex
roundTripper soap.RoundTripper
reference types.ManagedObjectReference
}
@ -46,7 +56,7 @@ func DefaultCollector(c *vim25.Client) *Collector {
return &p
}
func (p Collector) Reference() types.ManagedObjectReference {
func (p *Collector) Reference() types.ManagedObjectReference {
return p.reference
}
@ -85,18 +95,28 @@ func (p *Collector) Destroy(ctx context.Context) error {
return nil
}
func (p *Collector) CreateFilter(ctx context.Context, req types.CreateFilter) error {
func (p *Collector) CreateFilter(ctx context.Context, req types.CreateFilter) (*Filter, error) {
req.This = p.Reference()
_, err := methods.CreateFilter(ctx, p.roundTripper, &req)
resp, err := methods.CreateFilter(ctx, p.roundTripper, &req)
if err != nil {
return err
return nil, err
}
return nil
return &Filter{roundTripper: p.roundTripper, reference: resp.Returnval}, nil
}
func (p *Collector) WaitForUpdates(ctx context.Context, version string, opts ...*types.WaitOptions) (*types.UpdateSet, error) {
// Deprecated: Please use WaitForUpdatesEx instead.
func (p *Collector) WaitForUpdates(
ctx context.Context,
version string,
opts ...*types.WaitOptions) (*types.UpdateSet, error) {
if !p.mu.TryLock() {
return nil, ErrConcurrentCollector
}
defer p.mu.Unlock()
req := types.WaitForUpdatesEx{
This: p.Reference(),
Version: version,
@ -187,8 +207,15 @@ func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectRefe
return mo.LoadObjectContent(res.Returnval, dst)
}
// RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter.
func (p *Collector) RetrieveWithFilter(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}, filter Filter) error {
// RetrieveWithFilter populates dst as Retrieve does, but only for entities
// that match the specified filter.
func (p *Collector) RetrieveWithFilter(
ctx context.Context,
objs []types.ManagedObjectReference,
ps []string,
dst interface{},
filter Match) error {
if len(filter) == 0 {
return p.Retrieve(ctx, objs, ps, dst)
}
@ -200,7 +227,7 @@ func (p *Collector) RetrieveWithFilter(ctx context.Context, objs []types.Managed
return err
}
objs = filter.MatchObjectContent(content)
objs = filter.ObjectContent(content)
if len(objs) == 0 {
return nil
@ -214,3 +241,71 @@ func (p *Collector) RetrieveOne(ctx context.Context, obj types.ManagedObjectRefe
var objs = []types.ManagedObjectReference{obj}
return p.Retrieve(ctx, objs, ps, dst)
}
// WaitForUpdatesEx waits for any of the specified properties of the specified
// managed object to change. It calls the specified function for every update it
// receives. If this function returns false, it continues waiting for
// subsequent updates. If this function returns true, it stops waiting and
// returns.
//
// If the Context is canceled, a call to CancelWaitForUpdates() is made and its
// error value is returned.
//
// By default, ObjectUpdate.MissingSet faults are not propagated to the returned
// error, set WaitFilter.PropagateMissing=true to enable MissingSet fault
// propagation.
func (p *Collector) WaitForUpdatesEx(
ctx context.Context,
opts WaitOptions,
onUpdatesFn func([]types.ObjectUpdate) bool) error {
if !p.mu.TryLock() {
return ErrConcurrentCollector
}
defer p.mu.Unlock()
req := types.WaitForUpdatesEx{
This: p.Reference(),
Options: opts.Options,
}
for {
res, err := methods.WaitForUpdatesEx(ctx, p.roundTripper, &req)
if err != nil {
if ctx.Err() == context.Canceled {
return p.CancelWaitForUpdates(context.Background())
}
return err
}
set := res.Returnval
if set == nil {
if req.Options != nil && req.Options.MaxWaitSeconds != nil {
return nil // WaitOptions.MaxWaitSeconds exceeded
}
// Retry if the result came back empty
continue
}
req.Version = set.Version
opts.Truncated = false
if set.Truncated != nil {
opts.Truncated = *set.Truncated
}
for _, fs := range set.FilterSet {
if opts.PropagateMissing {
for i := range fs.ObjectSet {
for _, p := range fs.ObjectSet[i].MissingSet {
// Same behavior as mo.ObjectContentToType()
return soap.WrapVimFault(p.Fault.Fault)
}
}
}
if onUpdatesFn(fs.ObjectSet) {
return nil
}
}
}
}

View file

@ -1,5 +1,5 @@
/*
Copyright (c) 2017 VMware, Inc. All Rights Reserved.
Copyright (c) 2017-2024 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -17,152 +17,38 @@ limitations under the License.
package property
import (
"fmt"
"path"
"reflect"
"strconv"
"strings"
"context"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
)
// Filter provides methods for matching against types.DynamicProperty
type Filter map[string]types.AnyType
// Keys returns the Filter map keys as a []string
func (f Filter) Keys() []string {
keys := make([]string, 0, len(f))
for key := range f {
keys = append(keys, key)
}
return keys
// Filter models the Filter managed object.
//
// For more information, see:
// https://vdc-download.vmware.com/vmwb-repository/dcr-public/184bb3ba-6fa8-4574-a767-d0c96e2a38f4/ba9422ef-405c-47dd-8553-e11b619185b2/SDK/vsphere-ws/docs/ReferenceGuide/vmodl.query.PropertyCollector.Filter.html.
type Filter struct {
roundTripper soap.RoundTripper
reference types.ManagedObjectReference
}
// MatchProperty returns true if a Filter entry matches the given prop.
func (f Filter) MatchProperty(prop types.DynamicProperty) bool {
if prop.Val == nil {
return false
}
match, ok := f[prop.Name]
if !ok {
return false
}
if match == prop.Val {
return true
}
ptype := reflect.TypeOf(prop.Val)
if strings.HasPrefix(ptype.Name(), "ArrayOf") {
pval := reflect.ValueOf(prop.Val).Field(0)
for i := 0; i < pval.Len(); i++ {
prop.Val = pval.Index(i).Interface()
if f.MatchProperty(prop) {
return true
}
}
return false
}
if reflect.TypeOf(match) != ptype {
s, ok := match.(string)
if !ok {
return false
}
// convert if we can
switch val := prop.Val.(type) {
case bool:
match, _ = strconv.ParseBool(s)
case int16:
x, _ := strconv.ParseInt(s, 10, 16)
match = int16(x)
case int32:
x, _ := strconv.ParseInt(s, 10, 32)
match = int32(x)
case int64:
match, _ = strconv.ParseInt(s, 10, 64)
case float32:
x, _ := strconv.ParseFloat(s, 32)
match = float32(x)
case float64:
match, _ = strconv.ParseFloat(s, 64)
case fmt.Stringer:
prop.Val = val.String()
case *types.CustomFieldStringValue:
prop.Val = fmt.Sprintf("%d:%s", val.Key, val.Value)
default:
if ptype.Kind() != reflect.String {
return false
}
// An enum type we can convert to a string type
prop.Val = reflect.ValueOf(prop.Val).String()
}
}
switch pval := prop.Val.(type) {
case string:
s := match.(string)
if s == "*" {
return true // TODO: path.Match fails if s contains a '/'
}
m, _ := path.Match(s, pval)
return m
default:
return reflect.DeepEqual(match, pval)
}
func (f Filter) Reference() types.ManagedObjectReference {
return f.reference
}
// MatchPropertyList returns true if all given props match the Filter.
func (f Filter) MatchPropertyList(props []types.DynamicProperty) bool {
for _, p := range props {
if !f.MatchProperty(p) {
return false
}
// Destroy destroys this filter.
//
// This operation can be called explicitly, or it can take place implicitly when
// the session that created the filter is closed.
func (f *Filter) Destroy(ctx context.Context) error {
if _, err := methods.DestroyPropertyFilter(
ctx,
f.roundTripper,
&types.DestroyPropertyFilter{This: f.Reference()}); err != nil {
return err
}
return len(f) == len(props) // false if a property such as VM "guest" is unset
}
// MatchObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches all properties the Filter.
func (f Filter) MatchObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference {
var refs []types.ManagedObjectReference
for _, o := range objects {
if f.MatchPropertyList(o.PropSet) {
refs = append(refs, o.Obj)
}
}
return refs
}
// MatchAnyPropertyList returns true if any given props match the Filter.
func (f Filter) MatchAnyPropertyList(props []types.DynamicProperty) bool {
for _, p := range props {
if f.MatchProperty(p) {
return true
}
}
return false
}
// MatchAnyObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches any property in the Filter.
func (f Filter) MatchAnyObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference {
var refs []types.ManagedObjectReference
for _, o := range objects {
if f.MatchAnyPropertyList(o.PropSet) {
refs = append(refs, o.Obj)
}
}
return refs
f.reference = types.ManagedObjectReference{}
return nil
}

170
vendor/github.com/vmware/govmomi/property/match.go generated vendored Normal file
View file

@ -0,0 +1,170 @@
/*
Copyright (c) 2017-2024 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package property
import (
"fmt"
"path"
"reflect"
"strconv"
"strings"
"github.com/vmware/govmomi/vim25/types"
)
// Match provides methods for matching against types.DynamicProperty
type Match map[string]types.AnyType
// Keys returns the Match map keys as a []string
func (m Match) Keys() []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
return keys
}
// Property returns true if an entry matches the given prop.
func (m Match) Property(prop types.DynamicProperty) bool {
if prop.Val == nil {
return false
}
match, ok := m[prop.Name]
if !ok {
return false
}
if match == prop.Val {
return true
}
ptype := reflect.TypeOf(prop.Val)
if strings.HasPrefix(ptype.Name(), "ArrayOf") {
pval := reflect.ValueOf(prop.Val).Field(0)
for i := 0; i < pval.Len(); i++ {
prop.Val = pval.Index(i).Interface()
if m.Property(prop) {
return true
}
}
return false
}
if reflect.TypeOf(match) != ptype {
s, ok := match.(string)
if !ok {
return false
}
// convert if we can
switch val := prop.Val.(type) {
case bool:
match, _ = strconv.ParseBool(s)
case int16:
x, _ := strconv.ParseInt(s, 10, 16)
match = int16(x)
case int32:
x, _ := strconv.ParseInt(s, 10, 32)
match = int32(x)
case int64:
match, _ = strconv.ParseInt(s, 10, 64)
case float32:
x, _ := strconv.ParseFloat(s, 32)
match = float32(x)
case float64:
match, _ = strconv.ParseFloat(s, 64)
case fmt.Stringer:
prop.Val = val.String()
case *types.CustomFieldStringValue:
prop.Val = fmt.Sprintf("%d:%s", val.Key, val.Value)
default:
if ptype.Kind() != reflect.String {
return false
}
// An enum type we can convert to a string type
prop.Val = reflect.ValueOf(prop.Val).String()
}
}
switch pval := prop.Val.(type) {
case string:
s := match.(string)
if s == "*" {
return true // TODO: path.Match fails if s contains a '/'
}
m, _ := path.Match(s, pval)
return m
default:
return reflect.DeepEqual(match, pval)
}
}
// List returns true if all given props match.
func (m Match) List(props []types.DynamicProperty) bool {
for _, p := range props {
if !m.Property(p) {
return false
}
}
return len(m) == len(props) // false if a property such as VM "guest" is unset
}
// ObjectContent returns a list of ObjectContent.Obj where the
// ObjectContent.PropSet matches all properties the Filter.
func (m Match) ObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference {
var refs []types.ManagedObjectReference
for _, o := range objects {
if m.List(o.PropSet) {
refs = append(refs, o.Obj)
}
}
return refs
}
// AnyList returns true if any given props match.
func (m Match) AnyList(props []types.DynamicProperty) bool {
for _, p := range props {
if m.Property(p) {
return true
}
}
return false
}
// AnyObjectContent returns a list of ObjectContent.Obj where the
// ObjectContent.PropSet matches any property.
func (m Match) AnyObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference {
var refs []types.ManagedObjectReference
for _, o := range objects {
if m.AnyList(o.PropSet) {
refs = append(refs, o.Obj)
}
}
return refs
}

View file

@ -1,5 +1,5 @@
/*
Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved.
Copyright (c) 2015-2024 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -18,18 +18,23 @@ package property
import (
"context"
"fmt"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
)
// WaitOptions defines options for a property collector's WaitForUpdatesEx
// method.
type WaitOptions struct {
Options *types.WaitOptions
PropagateMissing bool
Truncated bool
}
// WaitFilter provides helpers to construct a types.CreateFilter for use with property.Wait
type WaitFilter struct {
types.CreateFilter
Options *types.WaitOptions
PropagateMissing bool
Truncated bool
WaitOptions
}
// Add a new ObjectSpec and PropertySpec to the WaitFilter
@ -70,8 +75,8 @@ func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, p
})
}
// WaitForUpdates waits for any of the specified properties of the specified managed
// object to change. It calls the specified function for every update it
// WaitForUpdates waits for any of the specified properties of the specified
// managed object to change. It calls the specified function for every update it
// receives. If this function returns false, it continues waiting for
// subsequent updates. If this function returns true, it stops waiting and
// returns.
@ -80,14 +85,24 @@ func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, p
// creates a new property collector and calls CreateFilter. A new property
// collector is required because filters can only be added, not removed.
//
// If the Context is canceled, a call to CancelWaitForUpdates() is made and its error value is returned.
// The newly created collector is destroyed before this function returns (both
// in case of success or error).
// If the Context is canceled, a call to CancelWaitForUpdates() is made and its
// error value is returned. The newly created collector is destroyed before this
// function returns (both in case of success or error).
//
// By default, ObjectUpdate.MissingSet faults are not propagated to the returned error,
// set WaitFilter.PropagateMissing=true to enable MissingSet fault propagation.
func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f func([]types.ObjectUpdate) bool) error {
p, err := c.Create(ctx)
// By default, ObjectUpdate.MissingSet faults are not propagated to the returned
// error, set WaitFilter.PropagateMissing=true to enable MissingSet fault
// propagation.
//
// Deprecated: Please consider using WaitForUpdatesEx instead, as it does not
// create a new property collector, instead it destroys the property filter
// after the expected update is received.
func WaitForUpdates(
ctx context.Context,
c *Collector,
filter *WaitFilter,
onUpdatesFn func([]types.ObjectUpdate) bool) (result error) {
pc, err := c.Create(ctx)
if err != nil {
return err
}
@ -95,57 +110,65 @@ func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f fun
// Attempt to destroy the collector using the background context, as the
// specified context may have timed out or have been canceled.
defer func() {
_ = p.Destroy(context.Background())
if err := pc.Destroy(context.Background()); err != nil {
if result == nil {
result = err
} else {
result = fmt.Errorf(
"destroy property collector failed with %s after failing to wait for updates: %w",
err,
result)
}
}
}()
err = p.CreateFilter(ctx, filter.CreateFilter)
// Create a property filter for the property collector.
if _, err := pc.CreateFilter(ctx, filter.CreateFilter); err != nil {
return err
}
return pc.WaitForUpdatesEx(ctx, filter.WaitOptions, onUpdatesFn)
}
// WaitForUpdates waits for any of the specified properties of the specified
// managed object to change. It calls the specified function for every update it
// receives. If this function returns false, it continues waiting for
// subsequent updates. If this function returns true, it stops waiting and
// returns.
//
// If the Context is canceled, a call to CancelWaitForUpdates() is made and its
// error value is returned.
//
// By default, ObjectUpdate.MissingSet faults are not propagated to the returned
// error, set WaitFilter.PropagateMissing=true to enable MissingSet fault
// propagation.
func WaitForUpdatesEx(
ctx context.Context,
pc *Collector,
filter *WaitFilter,
onUpdatesFn func([]types.ObjectUpdate) bool) (result error) {
// Create a property filter for the property collector.
pf, err := pc.CreateFilter(ctx, filter.CreateFilter)
if err != nil {
return err
}
req := types.WaitForUpdatesEx{
This: p.Reference(),
Options: filter.Options,
}
for {
res, err := methods.WaitForUpdatesEx(ctx, p.roundTripper, &req)
if err != nil {
if ctx.Err() == context.Canceled {
werr := p.CancelWaitForUpdates(context.Background())
return werr
}
return err
}
set := res.Returnval
if set == nil {
if req.Options != nil && req.Options.MaxWaitSeconds != nil {
return nil // WaitOptions.MaxWaitSeconds exceeded
}
// Retry if the result came back empty
continue
}
req.Version = set.Version
filter.Truncated = false
if set.Truncated != nil {
filter.Truncated = *set.Truncated
}
for _, fs := range set.FilterSet {
if filter.PropagateMissing {
for i := range fs.ObjectSet {
for _, p := range fs.ObjectSet[i].MissingSet {
// Same behavior as mo.ObjectContentToType()
return soap.WrapVimFault(p.Fault.Fault)
}
}
}
if f(fs.ObjectSet) {
return nil
// Destroy the filter using the background context, as the specified context
// may have timed out or have been canceled.
defer func() {
if err := pf.Destroy(context.Background()); err != nil {
if result == nil {
result = err
} else {
result = fmt.Errorf(
"destroy property filter failed with %s after failing to wait for updates: %w",
err,
result)
}
}
}
}()
return pc.WaitForUpdatesEx(ctx, filter.WaitOptions, onUpdatesFn)
}

View file

@ -1,5 +1,5 @@
/*
Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved.
Copyright (c) 2015-2024 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -99,7 +99,7 @@ func (t *taskCallback) fn(pc []types.PropertyChange) bool {
}
}
// Wait waits for a task to finish with either success or failure. It does so
// WaitEx waits for a task to finish with either success or failure. It does so
// by waiting for the "info" property of task managed object to change. The
// function returns when it finds the task in the "success" or "error" state.
// In the former case, the return value is nil. In the latter case the return
@ -113,7 +113,12 @@ func (t *taskCallback) fn(pc []types.PropertyChange) bool {
// The detail for the progress update is set to an empty string. If the task
// finishes in the error state, the error instance is passed through as well.
// Note that this error is the same error that is returned by this function.
func Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Collector, s progress.Sinker) (*types.TaskInfo, error) {
func WaitEx(
ctx context.Context,
ref types.ManagedObjectReference,
pc *property.Collector,
s progress.Sinker) (*types.TaskInfo, error) {
cb := &taskCallback{}
// Include progress sink if specified
@ -122,19 +127,29 @@ func Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Co
defer close(cb.ch)
}
filter := &property.WaitFilter{PropagateMissing: true}
filter := &property.WaitFilter{
WaitOptions: property.WaitOptions{
PropagateMissing: true,
},
}
filter.Add(ref, ref.Type, []string{"info"})
err := property.WaitForUpdates(ctx, pc, filter, func(updates []types.ObjectUpdate) bool {
for _, update := range updates {
if cb.fn(update.ChangeSet) {
return true
if err := property.WaitForUpdatesEx(
ctx,
pc,
filter,
func(updates []types.ObjectUpdate) bool {
for _, update := range updates {
// Only look at updates for the expected task object.
if update.Obj == ref {
if cb.fn(update.ChangeSet) {
return true
}
}
}
}
return false
}); err != nil {
return false
})
if err != nil {
return nil, err
}

View file

@ -91,7 +91,7 @@ func (v ContainerView) Retrieve(ctx context.Context, kind []string, ps []string,
}
// RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter.
func (v ContainerView) RetrieveWithFilter(ctx context.Context, kind []string, ps []string, dst interface{}, filter property.Filter) error {
func (v ContainerView) RetrieveWithFilter(ctx context.Context, kind []string, ps []string, dst interface{}, filter property.Match) error {
if len(filter) == 0 {
return v.Retrieve(ctx, kind, ps, dst)
}
@ -103,7 +103,7 @@ func (v ContainerView) RetrieveWithFilter(ctx context.Context, kind []string, ps
return err
}
objs := filter.MatchObjectContent(content)
objs := filter.ObjectContent(content)
pc := property.DefaultCollector(v.Client())
@ -111,10 +111,10 @@ func (v ContainerView) RetrieveWithFilter(ctx context.Context, kind []string, ps
}
// Find returns object references for entities of type kind, matching the given filter.
func (v ContainerView) Find(ctx context.Context, kind []string, filter property.Filter) ([]types.ManagedObjectReference, error) {
func (v ContainerView) Find(ctx context.Context, kind []string, filter property.Match) ([]types.ManagedObjectReference, error) {
if len(filter) == 0 {
// Ensure we have at least 1 filter to avoid retrieving all properties.
filter = property.Filter{"name": "*"}
filter = property.Match{"name": "*"}
}
var content []types.ObjectContent
@ -124,14 +124,14 @@ func (v ContainerView) Find(ctx context.Context, kind []string, filter property.
return nil, err
}
return filter.MatchObjectContent(content), nil
return filter.ObjectContent(content), nil
}
// FindAny returns object references for entities of type kind, matching any property the given filter.
func (v ContainerView) FindAny(ctx context.Context, kind []string, filter property.Filter) ([]types.ManagedObjectReference, error) {
func (v ContainerView) FindAny(ctx context.Context, kind []string, filter property.Match) ([]types.ManagedObjectReference, error) {
if len(filter) == 0 {
// Ensure we have at least 1 filter to avoid retrieving all properties.
filter = property.Filter{"name": "*"}
filter = property.Match{"name": "*"}
}
var content []types.ObjectContent
@ -141,5 +141,5 @@ func (v ContainerView) FindAny(ctx context.Context, kind []string, filter proper
return nil, err
}
return filter.MatchAnyObjectContent(content), nil
return filter.AnyObjectContent(content), nil
}

View file

@ -34,7 +34,7 @@ func init() {
}
type AlarmManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
DefaultExpression []types.BaseAlarmExpression `json:"defaultExpression"`
Description types.AlarmDescription `json:"description"`
@ -49,7 +49,7 @@ func init() {
}
type AuthorizationManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
PrivilegeList []types.AuthorizationPrivilege `json:"privilegeList"`
RoleList []types.AuthorizationRole `json:"roleList"`
@ -65,7 +65,7 @@ func init() {
}
type CertificateManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m CertificateManager) Reference() types.ManagedObjectReference {
@ -153,7 +153,7 @@ func init() {
}
type CryptoManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Enabled bool `json:"enabled"`
}
@ -193,7 +193,7 @@ func init() {
}
type CustomFieldsManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Field []types.CustomFieldDef `json:"field"`
}
@ -207,7 +207,7 @@ func init() {
}
type CustomizationSpecManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Info []types.CustomizationSpecInfo `json:"info"`
EncryptionKey []byte `json:"encryptionKey"`
@ -262,7 +262,7 @@ func init() {
}
type DatastoreNamespaceManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m DatastoreNamespaceManager) Reference() types.ManagedObjectReference {
@ -274,7 +274,7 @@ func init() {
}
type DiagnosticManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m DiagnosticManager) Reference() types.ManagedObjectReference {
@ -318,7 +318,7 @@ func init() {
}
type DistributedVirtualSwitchManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m DistributedVirtualSwitchManager) Reference() types.ManagedObjectReference {
@ -330,7 +330,7 @@ func init() {
}
type EnvironmentBrowser struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
DatastoreBrowser *types.ManagedObjectReference `json:"datastoreBrowser"`
}
@ -354,7 +354,7 @@ func init() {
}
type EventManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Description types.EventDescription `json:"description"`
LatestEvent types.BaseEvent `json:"latestEvent"`
@ -370,7 +370,7 @@ func init() {
}
type ExtensibleManagedObject struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Value []types.BaseCustomFieldValue `json:"value"`
AvailableField []types.CustomFieldDef `json:"availableField"`
@ -385,7 +385,7 @@ func init() {
}
type ExtensionManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
ExtensionList []types.Extension `json:"extensionList"`
}
@ -399,7 +399,7 @@ func init() {
}
type FailoverClusterConfigurator struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
DisabledConfigureMethod []string `json:"disabledConfigureMethod"`
}
@ -413,7 +413,7 @@ func init() {
}
type FailoverClusterManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
DisabledClusterMethod []string `json:"disabledClusterMethod"`
}
@ -427,7 +427,7 @@ func init() {
}
type FileManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m FileManager) Reference() types.ManagedObjectReference {
@ -455,7 +455,7 @@ func init() {
}
type GuestAliasManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m GuestAliasManager) Reference() types.ManagedObjectReference {
@ -467,7 +467,7 @@ func init() {
}
type GuestAuthManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m GuestAuthManager) Reference() types.ManagedObjectReference {
@ -479,7 +479,7 @@ func init() {
}
type GuestFileManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m GuestFileManager) Reference() types.ManagedObjectReference {
@ -491,7 +491,7 @@ func init() {
}
type GuestOperationsManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
AuthManager *types.ManagedObjectReference `json:"authManager"`
FileManager *types.ManagedObjectReference `json:"fileManager"`
@ -509,7 +509,7 @@ func init() {
}
type GuestProcessManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m GuestProcessManager) Reference() types.ManagedObjectReference {
@ -521,7 +521,7 @@ func init() {
}
type GuestWindowsRegistryManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m GuestWindowsRegistryManager) Reference() types.ManagedObjectReference {
@ -533,7 +533,7 @@ func init() {
}
type HealthUpdateManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HealthUpdateManager) Reference() types.ManagedObjectReference {
@ -545,7 +545,7 @@ func init() {
}
type HistoryCollector struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Filter types.AnyType `json:"filter"`
}
@ -559,7 +559,7 @@ func init() {
}
type HostAccessManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
LockdownMode types.HostLockdownMode `json:"lockdownMode"`
}
@ -581,7 +581,7 @@ func init() {
}
type HostAssignableHardwareManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Binding []types.HostAssignableHardwareBinding `json:"binding"`
Config types.HostAssignableHardwareConfig `json:"config"`
@ -596,7 +596,7 @@ func init() {
}
type HostAuthenticationManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Info types.HostAuthenticationManagerInfo `json:"info"`
SupportedStore []types.ManagedObjectReference `json:"supportedStore"`
@ -611,7 +611,7 @@ func init() {
}
type HostAuthenticationStore struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Info types.BaseHostAuthenticationStoreInfo `json:"info"`
}
@ -625,7 +625,7 @@ func init() {
}
type HostAutoStartManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Config types.HostAutoStartManagerConfig `json:"config"`
}
@ -639,7 +639,7 @@ func init() {
}
type HostBootDeviceSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostBootDeviceSystem) Reference() types.ManagedObjectReference {
@ -651,7 +651,7 @@ func init() {
}
type HostCacheConfigurationManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
CacheConfigurationInfo []types.HostCacheConfigurationInfo `json:"cacheConfigurationInfo"`
}
@ -665,7 +665,7 @@ func init() {
}
type HostCertificateManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
CertificateInfo types.HostCertificateManagerCertificateInfo `json:"certificateInfo"`
}
@ -689,7 +689,7 @@ func init() {
}
type HostDatastoreBrowser struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Datastore []types.ManagedObjectReference `json:"datastore"`
SupportedType []types.BaseFileQuery `json:"supportedType"`
@ -704,7 +704,7 @@ func init() {
}
type HostDatastoreSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Datastore []types.ManagedObjectReference `json:"datastore"`
Capabilities types.HostDatastoreSystemCapabilities `json:"capabilities"`
@ -719,7 +719,7 @@ func init() {
}
type HostDateTimeSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
DateTimeInfo types.HostDateTimeInfo `json:"dateTimeInfo"`
}
@ -733,7 +733,7 @@ func init() {
}
type HostDiagnosticSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
ActivePartition *types.HostDiagnosticPartition `json:"activePartition"`
}
@ -755,7 +755,7 @@ func init() {
}
type HostEsxAgentHostManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
ConfigInfo types.HostEsxAgentHostManagerConfigInfo `json:"configInfo"`
}
@ -779,7 +779,7 @@ func init() {
}
type HostFirmwareSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostFirmwareSystem) Reference() types.ManagedObjectReference {
@ -804,7 +804,7 @@ func init() {
}
type HostHealthStatusSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Runtime types.HealthSystemRuntime `json:"runtime"`
}
@ -818,7 +818,7 @@ func init() {
}
type HostImageConfigManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostImageConfigManager) Reference() types.ManagedObjectReference {
@ -830,7 +830,7 @@ func init() {
}
type HostKernelModuleSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostKernelModuleSystem) Reference() types.ManagedObjectReference {
@ -842,7 +842,7 @@ func init() {
}
type HostLocalAccountManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostLocalAccountManager) Reference() types.ManagedObjectReference {
@ -889,7 +889,7 @@ func init() {
}
type HostNvdimmSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
NvdimmSystemInfo types.NvdimmSystemInfo `json:"nvdimmSystemInfo"`
}
@ -903,7 +903,7 @@ func init() {
}
type HostPatchManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostPatchManager) Reference() types.ManagedObjectReference {
@ -926,7 +926,7 @@ func init() {
}
type HostPowerSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Capability types.PowerSystemCapability `json:"capability"`
Info types.PowerSystemInfo `json:"info"`
@ -972,7 +972,7 @@ func init() {
}
type HostSnmpSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Configuration types.HostSnmpConfigSpec `json:"configuration"`
Limits types.HostSnmpSystemAgentLimits `json:"limits"`
@ -987,7 +987,7 @@ func init() {
}
type HostSpecificationManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostSpecificationManager) Reference() types.ManagedObjectReference {
@ -1044,7 +1044,7 @@ func init() {
}
type HostVFlashManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
VFlashConfigInfo *types.HostVFlashManagerVFlashConfigInfo `json:"vFlashConfigInfo"`
}
@ -1087,7 +1087,7 @@ func init() {
}
type HostVsanInternalSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m HostVsanInternalSystem) Reference() types.ManagedObjectReference {
@ -1099,7 +1099,7 @@ func init() {
}
type HostVsanSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Config types.VsanHostConfigInfo `json:"config"`
}
@ -1113,7 +1113,7 @@ func init() {
}
type HttpNfcLease struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
InitializeProgress int32 `json:"initializeProgress"`
TransferProgress int32 `json:"transferProgress"`
@ -1141,7 +1141,7 @@ func init() {
}
type IoFilterManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m IoFilterManager) Reference() types.ManagedObjectReference {
@ -1153,7 +1153,7 @@ func init() {
}
type IpPoolManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m IpPoolManager) Reference() types.ManagedObjectReference {
@ -1165,7 +1165,7 @@ func init() {
}
type IscsiManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m IscsiManager) Reference() types.ManagedObjectReference {
@ -1177,7 +1177,7 @@ func init() {
}
type LicenseAssignmentManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m LicenseAssignmentManager) Reference() types.ManagedObjectReference {
@ -1189,7 +1189,7 @@ func init() {
}
type LicenseManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Source types.BaseLicenseSource `json:"source"`
SourceAvailable bool `json:"sourceAvailable"`
@ -1218,7 +1218,7 @@ func init() {
}
type LocalizationManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Catalog []types.LocalizationManagerMessageCatalog `json:"catalog"`
}
@ -1255,7 +1255,7 @@ func init() {
}
type ManagedObjectView struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
View []types.ManagedObjectReference `json:"view"`
}
@ -1269,7 +1269,7 @@ func init() {
}
type MessageBusProxy struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m MessageBusProxy) Reference() types.ManagedObjectReference {
@ -1309,7 +1309,7 @@ func init() {
}
type OptionManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
SupportedOption []types.OptionDef `json:"supportedOption"`
Setting []types.BaseOptionValue `json:"setting"`
@ -1324,7 +1324,7 @@ func init() {
}
type OverheadMemoryManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m OverheadMemoryManager) Reference() types.ManagedObjectReference {
@ -1336,7 +1336,7 @@ func init() {
}
type OvfManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
OvfImportOption []types.OvfOptionInfo `json:"ovfImportOption"`
OvfExportOption []types.OvfOptionInfo `json:"ovfExportOption"`
@ -1351,7 +1351,7 @@ func init() {
}
type PerformanceManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Description types.PerformanceDescription `json:"description"`
HistoricalInterval []types.PerfInterval `json:"historicalInterval"`
@ -1367,7 +1367,7 @@ func init() {
}
type Profile struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Config types.BaseProfileConfigInfo `json:"config"`
Description *types.ProfileDescription `json:"description"`
@ -1387,7 +1387,7 @@ func init() {
}
type ProfileComplianceManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m ProfileComplianceManager) Reference() types.ManagedObjectReference {
@ -1399,7 +1399,7 @@ func init() {
}
type ProfileManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Profile []types.ManagedObjectReference `json:"profile"`
}
@ -1413,7 +1413,7 @@ func init() {
}
type PropertyCollector struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Filter []types.ManagedObjectReference `json:"filter"`
}
@ -1427,7 +1427,7 @@ func init() {
}
type PropertyFilter struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Spec types.PropertyFilterSpec `json:"spec"`
PartialUpdates bool `json:"partialUpdates"`
@ -1442,7 +1442,7 @@ func init() {
}
type ResourcePlanningManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m ResourcePlanningManager) Reference() types.ManagedObjectReference {
@ -1485,7 +1485,7 @@ func init() {
}
type ScheduledTaskManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
ScheduledTask []types.ManagedObjectReference `json:"scheduledTask"`
Description types.ScheduledTaskDescription `json:"description"`
@ -1500,7 +1500,7 @@ func init() {
}
type SearchIndex struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m SearchIndex) Reference() types.ManagedObjectReference {
@ -1512,7 +1512,7 @@ func init() {
}
type ServiceInstance struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
ServerClock time.Time `json:"serverClock"`
Capability types.Capability `json:"capability"`
@ -1528,7 +1528,7 @@ func init() {
}
type ServiceManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
Service []types.ServiceManagerServiceInfo `json:"service"`
}
@ -1542,7 +1542,7 @@ func init() {
}
type SessionManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
SessionList []types.UserSession `json:"sessionList"`
CurrentSession *types.UserSession `json:"currentSession"`
@ -1561,7 +1561,7 @@ func init() {
}
type SimpleCommand struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
EncodingType types.SimpleCommandEncoding `json:"encodingType"`
Entity types.ServiceManagerServiceInfo `json:"entity"`
@ -1576,7 +1576,7 @@ func init() {
}
type SiteInfoManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m SiteInfoManager) Reference() types.ManagedObjectReference {
@ -1599,7 +1599,7 @@ func init() {
}
type StorageQueryManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m StorageQueryManager) Reference() types.ManagedObjectReference {
@ -1611,7 +1611,7 @@ func init() {
}
type StorageResourceManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m StorageResourceManager) Reference() types.ManagedObjectReference {
@ -1643,7 +1643,7 @@ func init() {
}
type TaskManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
RecentTask []types.ManagedObjectReference `json:"recentTask"`
Description types.TaskDescription `json:"description"`
@ -1659,7 +1659,7 @@ func init() {
}
type TenantTenantManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m TenantTenantManager) Reference() types.ManagedObjectReference {
@ -1671,7 +1671,7 @@ func init() {
}
type UserDirectory struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
DomainList []string `json:"domainList"`
}
@ -1685,7 +1685,7 @@ func init() {
}
type VStorageObjectManagerBase struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m VStorageObjectManagerBase) Reference() types.ManagedObjectReference {
@ -1705,7 +1705,7 @@ func init() {
}
type View struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m View) Reference() types.ManagedObjectReference {
@ -1717,7 +1717,7 @@ func init() {
}
type ViewManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
ViewList []types.ManagedObjectReference `json:"viewList"`
}
@ -1746,7 +1746,7 @@ func init() {
}
type VirtualDiskManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m VirtualDiskManager) Reference() types.ManagedObjectReference {
@ -1788,7 +1788,7 @@ func init() {
}
type VirtualMachineCompatibilityChecker struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m VirtualMachineCompatibilityChecker) Reference() types.ManagedObjectReference {
@ -1800,7 +1800,7 @@ func init() {
}
type VirtualMachineGuestCustomizationManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m VirtualMachineGuestCustomizationManager) Reference() types.ManagedObjectReference {
@ -1812,7 +1812,7 @@ func init() {
}
type VirtualMachineProvisioningChecker struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m VirtualMachineProvisioningChecker) Reference() types.ManagedObjectReference {
@ -1836,7 +1836,7 @@ func init() {
}
type VirtualizationManager struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m VirtualizationManager) Reference() types.ManagedObjectReference {
@ -1856,7 +1856,7 @@ func init() {
}
type VsanUpgradeSystem struct {
Self types.ManagedObjectReference
Self types.ManagedObjectReference `json:"self"`
}
func (m VsanUpgradeSystem) Reference() types.ManagedObjectReference {

View file

@ -77,7 +77,11 @@ func ApplyPropertyChange(obj Reference, changes []types.PropertyChange) {
for _, p := range changes {
rv, ok := t.props[p.Name]
if !ok {
panic(p.Name + " not found")
// For now, skip unknown properties allowing PC updates to be triggered
// for partial updates (e.g. extensionList["my.extension"]).
// Ultimately we should support partial updates by assigning the value
// reflectively in assignValue.
continue
}
assignValue(v, rv, reflect.ValueOf(p.Val))

Some files were not shown because too many files have changed in this diff Show more