deps: update osbuild/images to 157e798fdf8d

Update the osbuild/images dependency from 246b718310ea to 157e798fdf8d.
This commit is contained in:
Achilleas Koutsou 2023-08-01 12:42:59 +02:00 committed by Tomáš Hozza
parent 4c7b3dd25a
commit a4798ea64d
55 changed files with 42304 additions and 41796 deletions

10
go.mod
View file

@ -5,14 +5,14 @@ go 1.19
exclude github.com/mattn/go-sqlite3 v2.0.3+incompatible
require (
cloud.google.com/go/compute v1.22.0
cloud.google.com/go/compute v1.23.0
cloud.google.com/go/storage v1.31.0
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
github.com/Azure/go-autorest/autorest v0.11.29
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
github.com/BurntSushi/toml v1.3.2
github.com/aws/aws-sdk-go v1.44.304
github.com/aws/aws-sdk-go v1.44.313
github.com/coreos/go-semver v0.3.1
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/deepmap/oapi-codegen v1.8.2
@ -31,7 +31,7 @@ require (
github.com/labstack/gommon v0.4.0
github.com/openshift-online/ocm-sdk-go v0.1.315
github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.0.0-20230720095604-246b718310ea
github.com/osbuild/images v0.0.0-20230801094908-157e798fdf8d
github.com/prometheus/client_golang v1.16.0
github.com/segmentio/ksuid v1.0.4
github.com/sirupsen/logrus v1.9.3
@ -43,7 +43,7 @@ require (
golang.org/x/oauth2 v0.10.0
golang.org/x/sync v0.3.0
golang.org/x/sys v0.10.0
google.golang.org/api v0.132.0
google.golang.org/api v0.134.0
)
require (
@ -173,7 +173,7 @@ require (
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect
google.golang.org/grpc v1.56.2 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect

20
go.sum
View file

@ -21,8 +21,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y=
cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
@ -100,8 +100,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.44.304 h1:crcJBVeewWcVAXDQChzJWZYGFq9i1TYLycAtQ6Xpi4c=
github.com/aws/aws-sdk-go v1.44.304/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.313 h1:u6EuNQqgAmi09GEZ5g/XGHLF0XV31WcdU5rnHyIBHBc=
github.com/aws/aws-sdk-go v1.44.313/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -580,8 +580,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.315 h1:e4kDMkrWGyl90zF1dTD+GzcQlO5E8
github.com/openshift-online/ocm-sdk-go v0.1.315/go.mod h1:KYOw8kAKAHyPrJcQoVR82CneQ4ofC02Na4cXXaTq4Nw=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.0.0-20230720095604-246b718310ea h1:dNyzPyEiOPEg5EtunpoqIPd2FzAOdCdtcHgXHk7qmxo=
github.com/osbuild/images v0.0.0-20230720095604-246b718310ea/go.mod h1:ESol1XpXjKSTBzv1eyQHTUrsYdMUgyEWp/4CS+C20EU=
github.com/osbuild/images v0.0.0-20230801094908-157e798fdf8d h1:tRM7/UW2AK1u2mRL0anFtBZ0m+2U3h9J1FK7Y08yCsQ=
github.com/osbuild/images v0.0.0-20230801094908-157e798fdf8d/go.mod h1:l0EGnNOVGwW4HdwZ/LwuT1yot0ITyz3cjezc01w7Rmo=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -1058,8 +1058,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc=
google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0=
google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw=
google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1102,8 +1102,8 @@ google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIY
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU=
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=

File diff suppressed because it is too large Load diff

View file

@ -3317,6 +3317,11 @@
"List"
]
},
"Patch": {
"methods": [
"Patch"
]
},
"SetIamPolicy": {
"methods": [
"SetIamPolicy"

View file

@ -155,7 +155,7 @@ func (c *RegionSecurityPoliciesClient) List(ctx context.Context, req *computepb.
return c.internalClient.List(ctx, req, opts...)
}
// Patch patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
// Patch patches the specified policy with the data included in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
func (c *RegionSecurityPoliciesClient) Patch(ctx context.Context, req *computepb.PatchRegionSecurityPolicyRequest, opts ...gax.CallOption) (*Operation, error) {
return c.internalClient.Patch(ctx, req, opts...)
}
@ -540,7 +540,7 @@ func (c *regionSecurityPoliciesRESTClient) List(ctx context.Context, req *comput
return it
}
// Patch patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
// Patch patches the specified policy with the data included in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
func (c *regionSecurityPoliciesRESTClient) Patch(ctx context.Context, req *computepb.PatchRegionSecurityPolicyRequest, opts ...gax.CallOption) (*Operation, error) {
m := protojson.MarshalOptions{AllowPartial: true}
body := req.GetSecurityPolicyResource()

View file

@ -50,6 +50,7 @@ type ResourcePoliciesCallOptions struct {
GetIamPolicy []gax.CallOption
Insert []gax.CallOption
List []gax.CallOption
Patch []gax.CallOption
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
}
@ -110,6 +111,9 @@ func defaultResourcePoliciesRESTCallOptions() *ResourcePoliciesCallOptions {
http.StatusServiceUnavailable)
}),
},
Patch: []gax.CallOption{
gax.WithTimeout(600000 * time.Millisecond),
},
SetIamPolicy: []gax.CallOption{
gax.WithTimeout(600000 * time.Millisecond),
},
@ -130,6 +134,7 @@ type internalResourcePoliciesClient interface {
GetIamPolicy(context.Context, *computepb.GetIamPolicyResourcePolicyRequest, ...gax.CallOption) (*computepb.Policy, error)
Insert(context.Context, *computepb.InsertResourcePolicyRequest, ...gax.CallOption) (*Operation, error)
List(context.Context, *computepb.ListResourcePoliciesRequest, ...gax.CallOption) *ResourcePolicyIterator
Patch(context.Context, *computepb.PatchResourcePolicyRequest, ...gax.CallOption) (*Operation, error)
SetIamPolicy(context.Context, *computepb.SetIamPolicyResourcePolicyRequest, ...gax.CallOption) (*computepb.Policy, error)
TestIamPermissions(context.Context, *computepb.TestIamPermissionsResourcePolicyRequest, ...gax.CallOption) (*computepb.TestPermissionsResponse, error)
}
@ -199,6 +204,11 @@ func (c *ResourcePoliciesClient) List(ctx context.Context, req *computepb.ListRe
return c.internalClient.List(ctx, req, opts...)
}
// Patch modify the specified resource policy.
func (c *ResourcePoliciesClient) Patch(ctx context.Context, req *computepb.PatchResourcePolicyRequest, opts ...gax.CallOption) (*Operation, error) {
return c.internalClient.Patch(ctx, req, opts...)
}
// SetIamPolicy sets the access control policy on the specified resource. Replaces any existing policy.
func (c *ResourcePoliciesClient) SetIamPolicy(ctx context.Context, req *computepb.SetIamPolicyResourcePolicyRequest, opts ...gax.CallOption) (*computepb.Policy, error) {
return c.internalClient.SetIamPolicy(ctx, req, opts...)
@ -752,6 +762,84 @@ func (c *resourcePoliciesRESTClient) List(ctx context.Context, req *computepb.Li
return it
}
// Patch modify the specified resource policy.
func (c *resourcePoliciesRESTClient) Patch(ctx context.Context, req *computepb.PatchResourcePolicyRequest, opts ...gax.CallOption) (*Operation, error) {
m := protojson.MarshalOptions{AllowPartial: true}
body := req.GetResourcePolicyResource()
jsonReq, err := m.Marshal(body)
if err != nil {
return nil, err
}
baseUrl, err := url.Parse(c.endpoint)
if err != nil {
return nil, err
}
baseUrl.Path += fmt.Sprintf("/compute/v1/projects/%v/regions/%v/resourcePolicies/%v", req.GetProject(), req.GetRegion(), req.GetResourcePolicy())
params := url.Values{}
if req != nil && req.RequestId != nil {
params.Add("requestId", fmt.Sprintf("%v", req.GetRequestId()))
}
if req != nil && req.UpdateMask != nil {
params.Add("updateMask", fmt.Sprintf("%v", req.GetUpdateMask()))
}
baseUrl.RawQuery = params.Encode()
// Build HTTP headers from client and context metadata.
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v&%s=%v&%s=%v", "project", url.QueryEscape(req.GetProject()), "region", url.QueryEscape(req.GetRegion()), "resource_policy", url.QueryEscape(req.GetResourcePolicy())))
headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json"))
opts = append((*c.CallOptions).Patch[0:len((*c.CallOptions).Patch):len((*c.CallOptions).Patch)], opts...)
unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
resp := &computepb.Operation{}
e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
if settings.Path != "" {
baseUrl.Path = settings.Path
}
httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
if err != nil {
return err
}
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
httpRsp, err := c.httpClient.Do(httpReq)
if err != nil {
return err
}
defer httpRsp.Body.Close()
if err = googleapi.CheckResponse(httpRsp); err != nil {
return err
}
buf, err := io.ReadAll(httpRsp.Body)
if err != nil {
return err
}
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
return nil
}, opts...)
if e != nil {
return nil, e
}
op := &Operation{
&regionOperationsHandle{
c: c.operationClient,
proto: resp,
project: req.GetProject(),
region: req.GetRegion(),
},
}
return op, nil
}
// SetIamPolicy sets the access control policy on the specified resource. Replaces any existing policy.
func (c *resourcePoliciesRESTClient) SetIamPolicy(ctx context.Context, req *computepb.SetIamPolicyResourcePolicyRequest, opts ...gax.CallOption) (*computepb.Policy, error) {
m := protojson.MarshalOptions{AllowPartial: true}

View file

@ -238,12 +238,12 @@ func (c *SecurityPoliciesClient) ListPreconfiguredExpressionSets(ctx context.Con
return c.internalClient.ListPreconfiguredExpressionSets(ctx, req, opts...)
}
// Patch patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
// Patch patches the specified policy with the data included in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
func (c *SecurityPoliciesClient) Patch(ctx context.Context, req *computepb.PatchSecurityPolicyRequest, opts ...gax.CallOption) (*Operation, error) {
return c.internalClient.Patch(ctx, req, opts...)
}
// PatchRule patches a rule at the specified priority.
// PatchRule patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.
func (c *SecurityPoliciesClient) PatchRule(ctx context.Context, req *computepb.PatchRuleSecurityPolicyRequest, opts ...gax.CallOption) (*Operation, error) {
return c.internalClient.PatchRule(ctx, req, opts...)
}
@ -948,7 +948,7 @@ func (c *securityPoliciesRESTClient) ListPreconfiguredExpressionSets(ctx context
return resp, nil
}
// Patch patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
// Patch patches the specified policy with the data included in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.
func (c *securityPoliciesRESTClient) Patch(ctx context.Context, req *computepb.PatchSecurityPolicyRequest, opts ...gax.CallOption) (*Operation, error) {
m := protojson.MarshalOptions{AllowPartial: true}
body := req.GetSecurityPolicyResource()
@ -1022,7 +1022,7 @@ func (c *securityPoliciesRESTClient) Patch(ctx context.Context, req *computepb.P
return op, nil
}
// PatchRule patches a rule at the specified priority.
// PatchRule patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.
func (c *securityPoliciesRESTClient) PatchRule(ctx context.Context, req *computepb.PatchRuleSecurityPolicyRequest, opts ...gax.CallOption) (*Operation, error) {
m := protojson.MarshalOptions{AllowPartial: true}
body := req.GetSecurityPolicyRuleResource()

View file

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.22.0"
const Version = "1.23.0"

View file

@ -2660,6 +2660,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -2675,12 +2678,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -2690,6 +2699,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -8566,6 +8578,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -8575,18 +8590,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -8596,6 +8620,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -10856,6 +10883,9 @@ var awsPartition = partition{
},
"emr-containers": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@ -12805,6 +12835,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -12814,6 +12847,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -13926,6 +13962,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor.ca-central-1.api.aws",
},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.ca-central-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{
@ -13986,21 +14028,45 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-2",
}: endpoint{
Hostname: "internetmonitor.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-east-2.api.aws",
},
endpointKey{
Region: "us-west-1",
}: endpoint{
Hostname: "internetmonitor.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-2",
}: endpoint{
Hostname: "internetmonitor.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-west-2.api.aws",
},
},
},
"iot": service{
@ -25178,7 +25244,7 @@ var awsPartition = partition{
Region: "af-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.af-south-1.amazonaws.com",
Hostname: "servicediscovery.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
@ -25187,7 +25253,7 @@ var awsPartition = partition{
Region: "ap-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-east-1.amazonaws.com",
Hostname: "servicediscovery.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
@ -25196,7 +25262,7 @@ var awsPartition = partition{
Region: "ap-northeast-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-northeast-1.amazonaws.com",
Hostname: "servicediscovery.ap-northeast-1.api.aws",
},
endpointKey{
Region: "ap-northeast-2",
@ -25205,7 +25271,7 @@ var awsPartition = partition{
Region: "ap-northeast-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-northeast-2.amazonaws.com",
Hostname: "servicediscovery.ap-northeast-2.api.aws",
},
endpointKey{
Region: "ap-northeast-3",
@ -25214,7 +25280,7 @@ var awsPartition = partition{
Region: "ap-northeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-northeast-3.amazonaws.com",
Hostname: "servicediscovery.ap-northeast-3.api.aws",
},
endpointKey{
Region: "ap-south-1",
@ -25223,7 +25289,7 @@ var awsPartition = partition{
Region: "ap-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-south-1.amazonaws.com",
Hostname: "servicediscovery.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-south-2",
@ -25232,7 +25298,7 @@ var awsPartition = partition{
Region: "ap-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-south-2.amazonaws.com",
Hostname: "servicediscovery.ap-south-2.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
@ -25241,7 +25307,7 @@ var awsPartition = partition{
Region: "ap-southeast-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-1.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-1.api.aws",
},
endpointKey{
Region: "ap-southeast-2",
@ -25250,7 +25316,7 @@ var awsPartition = partition{
Region: "ap-southeast-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-2.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
@ -25259,7 +25325,7 @@ var awsPartition = partition{
Region: "ap-southeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-3.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
@ -25268,7 +25334,7 @@ var awsPartition = partition{
Region: "ap-southeast-4",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-4.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
@ -25277,7 +25343,7 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ca-central-1.amazonaws.com",
Hostname: "servicediscovery.ca-central-1.api.aws",
},
endpointKey{
Region: "ca-central-1",
@ -25285,6 +25351,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.ca-central-1.api.aws",
},
endpointKey{
Region: "ca-central-1-fips",
}: endpoint{
@ -25301,7 +25373,7 @@ var awsPartition = partition{
Region: "eu-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-central-1.amazonaws.com",
Hostname: "servicediscovery.eu-central-1.api.aws",
},
endpointKey{
Region: "eu-central-2",
@ -25310,7 +25382,7 @@ var awsPartition = partition{
Region: "eu-central-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-central-2.amazonaws.com",
Hostname: "servicediscovery.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
@ -25319,7 +25391,7 @@ var awsPartition = partition{
Region: "eu-north-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-north-1.amazonaws.com",
Hostname: "servicediscovery.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
@ -25328,7 +25400,7 @@ var awsPartition = partition{
Region: "eu-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-south-1.amazonaws.com",
Hostname: "servicediscovery.eu-south-1.api.aws",
},
endpointKey{
Region: "eu-south-2",
@ -25337,7 +25409,7 @@ var awsPartition = partition{
Region: "eu-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-south-2.amazonaws.com",
Hostname: "servicediscovery.eu-south-2.api.aws",
},
endpointKey{
Region: "eu-west-1",
@ -25346,7 +25418,7 @@ var awsPartition = partition{
Region: "eu-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-west-1.amazonaws.com",
Hostname: "servicediscovery.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
@ -25355,7 +25427,7 @@ var awsPartition = partition{
Region: "eu-west-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-west-2.amazonaws.com",
Hostname: "servicediscovery.eu-west-2.api.aws",
},
endpointKey{
Region: "eu-west-3",
@ -25364,7 +25436,7 @@ var awsPartition = partition{
Region: "eu-west-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-west-3.amazonaws.com",
Hostname: "servicediscovery.eu-west-3.api.aws",
},
endpointKey{
Region: "me-central-1",
@ -25373,7 +25445,7 @@ var awsPartition = partition{
Region: "me-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.me-central-1.amazonaws.com",
Hostname: "servicediscovery.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
@ -25382,7 +25454,7 @@ var awsPartition = partition{
Region: "me-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.me-south-1.amazonaws.com",
Hostname: "servicediscovery.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
@ -25391,7 +25463,7 @@ var awsPartition = partition{
Region: "sa-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.sa-east-1.amazonaws.com",
Hostname: "servicediscovery.sa-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
@ -25400,7 +25472,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-east-1.amazonaws.com",
Hostname: "servicediscovery.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
@ -25408,6 +25480,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1-fips",
}: endpoint{
@ -25424,7 +25502,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-east-2.amazonaws.com",
Hostname: "servicediscovery.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2",
@ -25432,6 +25510,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2-fips",
}: endpoint{
@ -25448,7 +25532,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-west-1.amazonaws.com",
Hostname: "servicediscovery.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-1",
@ -25456,6 +25540,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-1-fips",
}: endpoint{
@ -25472,7 +25562,7 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-west-2.amazonaws.com",
Hostname: "servicediscovery.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2",
@ -25480,6 +25570,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-west-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2-fips",
}: endpoint{
@ -25855,75 +25951,6 @@ var awsPartition = partition{
},
"sms": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "sms-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "sms-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "sms-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@ -25933,39 +25960,6 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -32598,11 +32592,18 @@ var awscnPartition = partition{
},
},
"savingsplans": service{
PartitionEndpoint: "aws-cn",
IsRegionalized: boxedFalse,
IsRegionalized: boxedTrue,
Endpoints: serviceEndpoints{
endpointKey{
Region: "aws-cn",
Region: "cn-north-1",
}: endpoint{
Hostname: "savingsplans.cn-north-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-north-1",
},
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
@ -32669,7 +32670,7 @@ var awscnPartition = partition{
Region: "cn-north-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn",
Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn",
},
endpointKey{
Region: "cn-northwest-1",
@ -32678,7 +32679,7 @@ var awscnPartition = partition{
Region: "cn-northwest-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn",
Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
},
@ -32712,9 +32713,6 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"snowball": service{
@ -38081,6 +38079,12 @@ var awsusgovPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
@ -38105,6 +38109,12 @@ var awsusgovPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
@ -38179,15 +38189,6 @@ var awsusgovPartition = partition{
},
"sms": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@ -38197,15 +38198,6 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@ -39775,6 +39767,15 @@ var awsisoPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-iso-west-1",
}: endpoint{
Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
@ -39784,6 +39785,15 @@ var awsisoPartition = partition{
}: endpoint{
Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
},
},
},
"rds": service{

View file

@ -191,7 +191,10 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req
if err != nil {
return nil, err
}
mySession := Must(NewSession())
// create oidcClient with AnonymousCredentials to avoid recursively resolving credentials
mySession := Must(NewSession(&aws.Config{
Credentials: credentials.AnonymousCredentials,
}))
oidcClient := ssooidc.New(mySession, cfgCopy)
tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath)
optFns = append(optFns, func(p *ssocreds.Provider) {

View file

@ -8,7 +8,7 @@
// Generally using the signer outside of the SDK should not require any additional
// logic when using Go v1.5 or higher. The signer does this by taking advantage
// of the URL.EscapedPath method. If your request URI requires additional escaping
// you many need to use the URL.Opaque to define what the raw URI should be sent
// you may need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
//
// The signer will first check the URL.Opaque field, and use its value if set.

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.304"
const SDKVersion = "1.44.313"

View file

@ -769,7 +769,7 @@ func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleAct
// If you finish before the timeout period ends, send a callback by using the
// CompleteLifecycleAction API call.
//
// For more information, see Amazon EC2 Auto Scaling lifecycle hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html)
// For more information, see Complete a lifecycle action (https://docs.aws.amazon.com/autoscaling/ec2/userguide/completing-lifecycle-hooks.html)
// in the Amazon EC2 Auto Scaling User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@ -4167,6 +4167,12 @@ func (c *AutoScaling) DescribeWarmPoolRequest(input *DescribeWarmPoolInput) (req
Name: opDescribeWarmPool,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
@ -4229,6 +4235,57 @@ func (c *AutoScaling) DescribeWarmPoolWithContext(ctx aws.Context, input *Descri
return out, req.Send()
}
// DescribeWarmPoolPages iterates over the pages of a DescribeWarmPool operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeWarmPool method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeWarmPool operation.
// pageNum := 0
// err := client.DescribeWarmPoolPages(params,
// func(page *autoscaling.DescribeWarmPoolOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
func (c *AutoScaling) DescribeWarmPoolPages(input *DescribeWarmPoolInput, fn func(*DescribeWarmPoolOutput, bool) bool) error {
return c.DescribeWarmPoolPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeWarmPoolPagesWithContext same as DescribeWarmPoolPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *AutoScaling) DescribeWarmPoolPagesWithContext(ctx aws.Context, input *DescribeWarmPoolInput, fn func(*DescribeWarmPoolOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeWarmPoolInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeWarmPoolRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*DescribeWarmPoolOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opDetachInstances = "DetachInstances"
// DetachInstancesRequest generates a "aws/request.Request" representing the
@ -4560,7 +4617,7 @@ func (c *AutoScaling) DetachTrafficSourcesRequest(input *DetachTrafficSourcesInp
//
// Detaches one or more traffic sources from the specified Auto Scaling group.
//
// When you detach a taffic, it enters the Removing state while deregistering
// When you detach a traffic source, it enters the Removing state while deregistering
// the instances in the group. When all instances are deregistered, then you
// can no longer describe the traffic source using the DescribeTrafficSources
// API call. The instances continue to run.
@ -6981,6 +7038,38 @@ func (s *Alarm) SetAlarmName(v string) *Alarm {
return s
}
// Specifies the CloudWatch alarm specification to use in an instance refresh.
type AlarmSpecification struct {
_ struct{} `type:"structure"`
// The names of one or more CloudWatch alarms to monitor for the instance refresh.
Alarms []*string `type:"list"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AlarmSpecification) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AlarmSpecification) GoString() string {
return s.String()
}
// SetAlarms sets the Alarms field's value.
func (s *AlarmSpecification) SetAlarms(v []*string) *AlarmSpecification {
s.Alarms = v
return s
}
type AttachInstancesInput struct {
_ struct{} `type:"structure"`
@ -18014,7 +18103,7 @@ type PutScalingPolicyInput struct {
// The amount by which to scale, based on the specified adjustment type. A positive
// value adds to the current capacity while a negative number removes from the
// current capacity. For exact capacity, you must specify a positive value.
// current capacity. For exact capacity, you must specify a non-negative value.
//
// Required if the policy type is SimpleScaling. (Not used with any other policy
// type.)
@ -18687,8 +18776,13 @@ func (s RecordLifecycleActionHeartbeatOutput) GoString() string {
type RefreshPreferences struct {
_ struct{} `type:"structure"`
// (Optional) The CloudWatch alarm specification. CloudWatch alarms can be used
// to identify any issues and fail the operation if an alarm threshold is met.
AlarmSpecification *AlarmSpecification `type:"structure"`
// (Optional) Indicates whether to roll back the Auto Scaling group to its previous
// configuration if the instance refresh fails. The default is false.
// configuration if the instance refresh fails or a CloudWatch alarm threshold
// is met. The default is false.
//
// A rollback is not supported in the following situations:
//
@ -18700,6 +18794,9 @@ type RefreshPreferences struct {
//
// * The Auto Scaling group uses the launch template's $Latest or $Default
// version.
//
// For more information, see Undo changes with a rollback (https://docs.aws.amazon.com/autoscaling/ec2/userguide/instance-refresh-rollback.html)
// in the Amazon EC2 Auto Scaling User Guide.
AutoRollback *bool `type:"boolean"`
// (Optional) The amount of time, in seconds, to wait after a checkpoint before
@ -18812,6 +18909,12 @@ func (s RefreshPreferences) GoString() string {
return s.String()
}
// SetAlarmSpecification sets the AlarmSpecification field's value.
func (s *RefreshPreferences) SetAlarmSpecification(v *AlarmSpecification) *RefreshPreferences {
s.AlarmSpecification = v
return s
}
// SetAutoRollback sets the AutoRollback field's value.
func (s *RefreshPreferences) SetAutoRollback(v bool) *RefreshPreferences {
s.AutoRollback = &v
@ -18957,7 +19060,9 @@ type RollbackInstanceRefreshInput struct {
_ struct{} `type:"structure"`
// The name of the Auto Scaling group.
AutoScalingGroupName *string `min:"1" type:"string"`
//
// AutoScalingGroupName is a required field
AutoScalingGroupName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation.
@ -18981,6 +19086,9 @@ func (s RollbackInstanceRefreshInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *RollbackInstanceRefreshInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RollbackInstanceRefreshInput"}
if s.AutoScalingGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName"))
}
if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1))
}
@ -19899,6 +20007,8 @@ type StartInstanceRefreshInput struct {
//
// * Checkpoints
//
// * CloudWatch alarms
//
// * Skip matching
Preferences *RefreshPreferences `type:"structure"`
@ -20052,12 +20162,7 @@ type StepAdjustment struct {
// The amount by which to scale, based on the specified adjustment type. A positive
// value adds to the current capacity while a negative number removes from the
// current capacity.
//
// The amount by which to scale. The adjustment is based on the value that you
// specified in the AdjustmentType property (either an absolute number or a
// percentage). A positive value adds to the current capacity and a negative
// number subtracts from the current capacity.
// current capacity. For exact capacity, you must specify a non-negative value.
//
// ScalingAdjustment is a required field
ScalingAdjustment *int64 `type:"integer" required:"true"`

View file

@ -120713,6 +120713,9 @@ type GetEbsEncryptionByDefaultOutput struct {
// Indicates whether encryption by default is enabled.
EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
}
// String returns the string representation.
@ -120739,6 +120742,12 @@ func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *Get
return s
}
// SetSseType sets the SseType field's value.
func (s *GetEbsEncryptionByDefaultOutput) SetSseType(v string) *GetEbsEncryptionByDefaultOutput {
s.SseType = &v
return s
}
type GetFlowLogsIntegrationTemplateInput struct {
_ struct{} `type:"structure"`
@ -128525,6 +128534,10 @@ type InferenceAcceleratorInfo struct {
// Describes the Inference accelerators for the instance type.
Accelerators []*InferenceDeviceInfo `locationName:"accelerators" type:"list"`
// The total size of the memory for the inference accelerators for the instance
// type, in MiB.
TotalInferenceMemoryInMiB *int64 `locationName:"totalInferenceMemoryInMiB" type:"integer"`
}
// String returns the string representation.
@ -128551,6 +128564,12 @@ func (s *InferenceAcceleratorInfo) SetAccelerators(v []*InferenceDeviceInfo) *In
return s
}
// SetTotalInferenceMemoryInMiB sets the TotalInferenceMemoryInMiB field's value.
func (s *InferenceAcceleratorInfo) SetTotalInferenceMemoryInMiB(v int64) *InferenceAcceleratorInfo {
s.TotalInferenceMemoryInMiB = &v
return s
}
// Describes the Inference accelerators for the instance type.
type InferenceDeviceInfo struct {
_ struct{} `type:"structure"`
@ -128561,6 +128580,9 @@ type InferenceDeviceInfo struct {
// The manufacturer of the Inference accelerator.
Manufacturer *string `locationName:"manufacturer" type:"string"`
// Describes the memory available to the inference accelerator.
MemoryInfo *InferenceDeviceMemoryInfo `locationName:"memoryInfo" type:"structure"`
// The name of the Inference accelerator.
Name *string `locationName:"name" type:"string"`
}
@ -128595,12 +128617,50 @@ func (s *InferenceDeviceInfo) SetManufacturer(v string) *InferenceDeviceInfo {
return s
}
// SetMemoryInfo sets the MemoryInfo field's value.
func (s *InferenceDeviceInfo) SetMemoryInfo(v *InferenceDeviceMemoryInfo) *InferenceDeviceInfo {
s.MemoryInfo = v
return s
}
// SetName sets the Name field's value.
func (s *InferenceDeviceInfo) SetName(v string) *InferenceDeviceInfo {
s.Name = &v
return s
}
// Describes the memory available to the inference accelerator.
type InferenceDeviceMemoryInfo struct {
_ struct{} `type:"structure"`
// The size of the memory available to the inference accelerator, in MiB.
SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InferenceDeviceMemoryInfo) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InferenceDeviceMemoryInfo) GoString() string {
return s.String()
}
// SetSizeInMiB sets the SizeInMiB field's value.
func (s *InferenceDeviceMemoryInfo) SetSizeInMiB(v int64) *InferenceDeviceMemoryInfo {
s.SizeInMiB = &v
return s
}
// Describes an instance.
type Instance struct {
_ struct{} `type:"structure"`
@ -150493,6 +150553,9 @@ func (s *NetworkBandwidthGbpsRequest) SetMin(v float64) *NetworkBandwidthGbpsReq
type NetworkCardInfo struct {
_ struct{} `type:"structure"`
// The baseline network performance of the network card, in Gbps.
BaselineBandwidthInGbps *float64 `locationName:"baselineBandwidthInGbps" type:"double"`
// The maximum number of network interfaces for the network card.
MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"`
@ -150501,6 +150564,9 @@ type NetworkCardInfo struct {
// The network performance of the network card.
NetworkPerformance *string `locationName:"networkPerformance" type:"string"`
// The peak (burst) network performance of the network card, in Gbps.
PeakBandwidthInGbps *float64 `locationName:"peakBandwidthInGbps" type:"double"`
}
// String returns the string representation.
@ -150521,6 +150587,12 @@ func (s NetworkCardInfo) GoString() string {
return s.String()
}
// SetBaselineBandwidthInGbps sets the BaselineBandwidthInGbps field's value.
func (s *NetworkCardInfo) SetBaselineBandwidthInGbps(v float64) *NetworkCardInfo {
s.BaselineBandwidthInGbps = &v
return s
}
// SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value.
func (s *NetworkCardInfo) SetMaximumNetworkInterfaces(v int64) *NetworkCardInfo {
s.MaximumNetworkInterfaces = &v
@ -150539,6 +150611,12 @@ func (s *NetworkCardInfo) SetNetworkPerformance(v string) *NetworkCardInfo {
return s
}
// SetPeakBandwidthInGbps sets the PeakBandwidthInGbps field's value.
func (s *NetworkCardInfo) SetPeakBandwidthInGbps(v float64) *NetworkCardInfo {
s.PeakBandwidthInGbps = &v
return s
}
// Describes the networking features of the instance type.
type NetworkInfo struct {
_ struct{} `type:"structure"`
@ -162638,6 +162716,9 @@ type RestoreSnapshotFromRecycleBinOutput struct {
// The ID of the snapshot.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// The time stamp when the snapshot was initiated.
StartTime *time.Time `locationName:"startTime" type:"timestamp"`
@ -162705,6 +162786,12 @@ func (s *RestoreSnapshotFromRecycleBinOutput) SetSnapshotId(v string) *RestoreSn
return s
}
// SetSseType sets the SseType field's value.
func (s *RestoreSnapshotFromRecycleBinOutput) SetSseType(v string) *RestoreSnapshotFromRecycleBinOutput {
s.SseType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *RestoreSnapshotFromRecycleBinOutput) SetStartTime(v time.Time) *RestoreSnapshotFromRecycleBinOutput {
s.StartTime = &v
@ -167454,6 +167541,9 @@ type Snapshot struct {
// is created.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// The time stamp when the snapshot was initiated.
StartTime *time.Time `locationName:"startTime" type:"timestamp"`
@ -167563,6 +167653,12 @@ func (s *Snapshot) SetSnapshotId(v string) *Snapshot {
return s
}
// SetSseType sets the SseType field's value.
func (s *Snapshot) SetSseType(v string) *Snapshot {
s.SseType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *Snapshot) SetStartTime(v time.Time) *Snapshot {
s.StartTime = &v
@ -167813,6 +167909,9 @@ type SnapshotInfo struct {
// Snapshot id that can be used to describe this snapshot.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// Time this snapshot was started. This is the same for all snapshots initiated
// by the same request.
StartTime *time.Time `locationName:"startTime" type:"timestamp"`
@ -167884,6 +167983,12 @@ func (s *SnapshotInfo) SetSnapshotId(v string) *SnapshotInfo {
return s
}
// SetSseType sets the SseType field's value.
func (s *SnapshotInfo) SetSseType(v string) *SnapshotInfo {
s.SseType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *SnapshotInfo) SetStartTime(v time.Time) *SnapshotInfo {
s.StartTime = &v
@ -179154,6 +179259,9 @@ type Volume struct {
// The snapshot from which the volume was created, if applicable.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// The volume state.
State *string `locationName:"status" type:"string" enum:"VolumeState"`
@ -179254,6 +179362,12 @@ func (s *Volume) SetSnapshotId(v string) *Volume {
return s
}
// SetSseType sets the SseType field's value.
func (s *Volume) SetSseType(v string) *Volume {
s.SseType = &v
return s
}
// SetState sets the State field's value.
func (s *Volume) SetState(v string) *Volume {
s.State = &v
@ -189848,6 +189962,26 @@ func RuleAction_Values() []string {
}
}
const (
// SSETypeSseEbs is a SSEType enum value
SSETypeSseEbs = "sse-ebs"
// SSETypeSseKms is a SSEType enum value
SSETypeSseKms = "sse-kms"
// SSETypeNone is a SSEType enum value
SSETypeNone = "none"
)
// SSEType_Values returns all elements of the SSEType enum
func SSEType_Values() []string {
return []string{
SSETypeSseEbs,
SSETypeSseKms,
SSETypeNone,
}
}
const (
// ScopeAvailabilityZone is a Scope enum value
ScopeAvailabilityZone = "Availability Zone"
@ -190067,6 +190201,9 @@ const (
// SpotInstanceStateFailed is a SpotInstanceState enum value
SpotInstanceStateFailed = "failed"
// SpotInstanceStateDisabled is a SpotInstanceState enum value
SpotInstanceStateDisabled = "disabled"
)
// SpotInstanceState_Values returns all elements of the SpotInstanceState enum
@ -190077,6 +190214,7 @@ func SpotInstanceState_Values() []string {
SpotInstanceStateClosed,
SpotInstanceStateCancelled,
SpotInstanceStateFailed,
SpotInstanceStateDisabled,
}
}

View file

@ -1283,6 +1283,62 @@ func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context,
return w.WaitWithContext(ctx)
}
// WaitUntilStoreImageTaskComplete uses the Amazon EC2 API operation
// DescribeStoreImageTasks to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *EC2) WaitUntilStoreImageTaskComplete(input *DescribeStoreImageTasksInput) error {
return c.WaitUntilStoreImageTaskCompleteWithContext(aws.BackgroundContext(), input)
}
// WaitUntilStoreImageTaskCompleteWithContext is an extended version of WaitUntilStoreImageTaskComplete.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) WaitUntilStoreImageTaskCompleteWithContext(ctx aws.Context, input *DescribeStoreImageTasksInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilStoreImageTaskComplete",
MaxAttempts: 40,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathAllWaiterMatch, Argument: "StoreImageTaskResults[].StoreTaskState",
Expected: "Completed",
},
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "StoreImageTaskResults[].StoreTaskState",
Expected: "Failed",
},
{
State: request.RetryWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "StoreImageTaskResults[].StoreTaskState",
Expected: "InProgress",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeStoreImageTasksInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeStoreImageTasksRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilSubnetAvailable uses the Amazon EC2 API operation
// DescribeSubnets to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will

View file

@ -10709,6 +10709,7 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject
}
var _ awserr.Error
var _ time.Time
// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent.
//

View file

@ -1460,6 +1460,9 @@ type AssumeRoleInput struct {
// in the IAM User Guide.
PolicyArns []*PolicyDescriptorType `type:"list"`
// Reserved for future use.
ProvidedContexts []*ProvidedContext `type:"list"`
// The Amazon Resource Name (ARN) of the role to assume.
//
// RoleArn is a required field
@ -1633,6 +1636,16 @@ func (s *AssumeRoleInput) Validate() error {
}
}
}
if s.ProvidedContexts != nil {
for i, v := range s.ProvidedContexts {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams))
}
}
}
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
@ -1674,6 +1687,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn
return s
}
// SetProvidedContexts sets the ProvidedContexts field's value.
func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput {
s.ProvidedContexts = v
return s
}
// SetRoleArn sets the RoleArn field's value.
func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
s.RoleArn = &v
@ -2266,7 +2285,8 @@ type AssumeRoleWithWebIdentityInput struct {
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
// the identity provider. Your application must get this token by authenticating
// the user who is using your application with a web identity provider before
// the application makes an AssumeRoleWithWebIdentity call.
// the application makes an AssumeRoleWithWebIdentity call. Only tokens with
// RSA algorithms (RS256) are supported.
//
// WebIdentityToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's
@ -3385,6 +3405,63 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
return s
}
// Reserved for future use.
type ProvidedContext struct {
_ struct{} `type:"structure"`
// Reserved for future use.
ContextAssertion *string `min:"4" type:"string"`
// Reserved for future use.
ProviderArn *string `min:"20" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ProvidedContext) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ProvidedContext) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ProvidedContext) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"}
if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 {
invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4))
}
if s.ProviderArn != nil && len(*s.ProviderArn) < 20 {
invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetContextAssertion sets the ContextAssertion field's value.
func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext {
s.ContextAssertion = &v
return s
}
// SetProviderArn sets the ProviderArn field's value.
func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext {
s.ProviderArn = &v
return s
}
// You can pass custom key-value pair attributes when you assume a role or federate
// a user. These are called session tags. You can then use the session tags
// to control access to resources. For more information, see Tagging Amazon

View file

@ -1,175 +0,0 @@
package common
import (
"encoding/json"
"io"
"github.com/labstack/gommon/log"
"github.com/sirupsen/logrus"
)
// EchoLogrusLogger extend logrus.Logger
type EchoLogrusLogger struct {
*logrus.Logger
}
var commonLogger = &EchoLogrusLogger{
Logger: logrus.StandardLogger(),
}
func Logger() *EchoLogrusLogger {
return commonLogger
}
func toEchoLevel(level logrus.Level) log.Lvl {
switch level {
case logrus.DebugLevel:
return log.DEBUG
case logrus.InfoLevel:
return log.INFO
case logrus.WarnLevel:
return log.WARN
case logrus.ErrorLevel:
return log.ERROR
}
return log.OFF
}
func (l *EchoLogrusLogger) Output() io.Writer {
return l.Out
}
func (l *EchoLogrusLogger) SetOutput(w io.Writer) {
// disable operations that would change behavior of global logrus logger.
}
func (l *EchoLogrusLogger) Level() log.Lvl {
return toEchoLevel(l.Logger.Level)
}
func (l *EchoLogrusLogger) SetLevel(v log.Lvl) {
// disable operations that would change behavior of global logrus logger.
}
func (l *EchoLogrusLogger) SetHeader(h string) {
}
func (l *EchoLogrusLogger) Prefix() string {
return ""
}
func (l *EchoLogrusLogger) SetPrefix(p string) {
}
func (l *EchoLogrusLogger) Print(i ...interface{}) {
l.Logger.Print(i...)
}
func (l *EchoLogrusLogger) Printf(format string, args ...interface{}) {
l.Logger.Printf(format, args...)
}
func (l *EchoLogrusLogger) Printj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Println(string(b))
}
func (l *EchoLogrusLogger) Debug(i ...interface{}) {
l.Logger.Debug(i...)
}
func (l *EchoLogrusLogger) Debugf(format string, args ...interface{}) {
l.Logger.Debugf(format, args...)
}
func (l *EchoLogrusLogger) Debugj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Debugln(string(b))
}
func (l *EchoLogrusLogger) Info(i ...interface{}) {
l.Logger.Info(i...)
}
func (l *EchoLogrusLogger) Infof(format string, args ...interface{}) {
l.Logger.Infof(format, args...)
}
func (l *EchoLogrusLogger) Infoj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Infoln(string(b))
}
func (l *EchoLogrusLogger) Warn(i ...interface{}) {
l.Logger.Warn(i...)
}
func (l *EchoLogrusLogger) Warnf(format string, args ...interface{}) {
l.Logger.Warnf(format, args...)
}
func (l *EchoLogrusLogger) Warnj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Warnln(string(b))
}
func (l *EchoLogrusLogger) Error(i ...interface{}) {
l.Logger.Error(i...)
}
func (l *EchoLogrusLogger) Errorf(format string, args ...interface{}) {
l.Logger.Errorf(format, args...)
}
func (l *EchoLogrusLogger) Errorj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Errorln(string(b))
}
func (l *EchoLogrusLogger) Fatal(i ...interface{}) {
l.Logger.Fatal(i...)
}
func (l *EchoLogrusLogger) Fatalf(format string, args ...interface{}) {
l.Logger.Fatalf(format, args...)
}
func (l *EchoLogrusLogger) Fatalj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Fatalln(string(b))
}
func (l *EchoLogrusLogger) Panic(i ...interface{}) {
l.Logger.Panic(i...)
}
func (l *EchoLogrusLogger) Panicf(format string, args ...interface{}) {
l.Logger.Panicf(format, args...)
}
func (l *EchoLogrusLogger) Panicj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Panicln(string(b))
}

View file

@ -1,22 +0,0 @@
package common
import (
"github.com/labstack/echo/v4"
"github.com/segmentio/ksuid"
)
const OperationIDKey string = "operationID"
// Adds a time-sortable globally unique identifier to an echo.Context if not already set
func OperationIDMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if c.Get(OperationIDKey) == nil {
c.Set(OperationIDKey, GenerateOperationID())
}
return next(c)
}
}
func GenerateOperationID() string {
return ksuid.New().String()
}

View file

@ -358,8 +358,9 @@ func (s *Solver) makeDepsolveRequest(pkgSets []rpmmd.PackageSet) (*Request, map[
transactions := make([]transactionArgs, len(pkgSets))
for dsIdx, pkgSet := range pkgSets {
transactions[dsIdx] = transactionArgs{
PackageSpecs: pkgSet.Include,
ExcludeSpecs: pkgSet.Exclude,
PackageSpecs: pkgSet.Include,
ExcludeSpecs: pkgSet.Exclude,
InstallWeakDeps: pkgSet.InstallWeakDeps,
}
for _, jobRepo := range pkgSet.Repositories {
@ -538,6 +539,9 @@ type transactionArgs struct {
// IDs of repositories to use for this depsolve
RepoIDs []string `json:"repo-ids"`
// If we want weak deps for this depsolve
InstallWeakDeps bool `json:"install_weak_deps"`
}
type packageSpecs []PackageSpec

View file

@ -24,6 +24,7 @@ var CustomDirectoriesPolicies = NewPathPolicies(map[string]PathPolicy{
var CustomFilesPolicies = NewPathPolicies(map[string]PathPolicy{
"/": {Deny: true},
"/etc": {},
"/root": {},
"/etc/fstab": {Deny: true},
"/etc/shadow": {Deny: true},
"/etc/passwd": {Deny: true},

View file

@ -1,15 +1,6 @@
// Package blueprint contains primitives for representing weldr blueprints
package blueprint
import (
"encoding/json"
"fmt"
"github.com/osbuild/images/pkg/crypt"
"github.com/coreos/go-semver/semver"
)
// A Blueprint is a high-level description of an image.
type Blueprint struct {
Name string `json:"name" toml:"name"`
@ -49,71 +40,6 @@ type Container struct {
TLSVerify *bool `json:"tls-verify,omitempty" toml:"tls-verify,omitempty"`
}
// DeepCopy returns a deep copy of the blueprint
// This uses json.Marshal and Unmarshal which are not very efficient
func (b *Blueprint) DeepCopy() Blueprint {
bpJSON, err := json.Marshal(b)
if err != nil {
panic(err)
}
var bp Blueprint
err = json.Unmarshal(bpJSON, &bp)
if err != nil {
panic(err)
}
return bp
}
// Initialize ensures that the blueprint has sane defaults for any missing fields
func (b *Blueprint) Initialize() error {
if len(b.Name) == 0 {
return fmt.Errorf("empty blueprint name not allowed")
}
if b.Packages == nil {
b.Packages = []Package{}
}
if b.Modules == nil {
b.Modules = []Package{}
}
if b.Groups == nil {
b.Groups = []Group{}
}
if b.Containers == nil {
b.Containers = []Container{}
}
if b.Version == "" {
b.Version = "0.0.0"
}
// Return an error if the version is not valid
_, err := semver.NewVersion(b.Version)
if err != nil {
return fmt.Errorf("Invalid 'version', must use Semantic Versioning: %s", err.Error())
}
err = b.CryptPasswords()
if err != nil {
return fmt.Errorf("Error hashing passwords: %s", err.Error())
}
return nil
}
// BumpVersion increments the previous blueprint's version
// If the old version string is not vaild semver it will use the new version as-is
// This assumes that the new blueprint's version has already been validated via Initialize
func (b *Blueprint) BumpVersion(old string) {
var ver *semver.Version
ver, err := semver.NewVersion(old)
if err != nil {
return
}
ver.BumpPatch()
b.Version = ver.String()
}
// packages, modules, and groups all resolve to rpm packages right now. This
// function returns a combined list of "name-version" strings.
func (b *Blueprint) GetPackages() []string {
@ -149,36 +75,3 @@ func (p Package) ToNameVersion() string {
return p.Name + "-" + p.Version
}
// CryptPasswords ensures that all blueprint passwords are hashed
func (b *Blueprint) CryptPasswords() error {
if b.Customizations == nil {
return nil
}
// Any passwords for users?
for i := range b.Customizations.User {
// Missing or empty password
if b.Customizations.User[i].Password == nil {
continue
}
// Prevent empty password from being hashed
if len(*b.Customizations.User[i].Password) == 0 {
b.Customizations.User[i].Password = nil
continue
}
if !crypt.PasswordIsCrypted(*b.Customizations.User[i].Password) {
pw, err := crypt.CryptSHA512(*b.Customizations.User[i].Password)
if err != nil {
return err
}
// Replace the password with the
b.Customizations.User[i].Password = &pw
}
}
return nil
}

View file

@ -3,6 +3,7 @@ package container
import (
"context"
"fmt"
"sort"
"strings"
)
@ -79,5 +80,8 @@ func (r *Resolver) Finish() ([]Spec, error) {
return specs, fmt.Errorf("failed to resolve container: %s", detail)
}
// Return a stable result, sorted by Digest
sort.Slice(specs, func(i, j int) bool { return specs[i].Digest < specs[j].Digest })
return specs, nil
}

View file

@ -46,7 +46,6 @@ var (
iotServices = []string{
"NetworkManager.service",
"firewalld.service",
"rngd.service",
"sshd.service",
"zezere_ignition.timer",
"zezere_ignition_banner.service",
@ -202,7 +201,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 5 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
@ -230,7 +229,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -259,7 +258,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
@ -277,7 +276,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},
@ -341,7 +340,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -200,7 +200,7 @@ func osCustomizations(
// IMAGES
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -208,7 +208,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], containers, customizations)
img.Environment = t.environment
@ -255,17 +255,6 @@ func liveInstallerImage(workload workload.Workload,
img := image.NewAnacondaLiveInstaller()
distro := t.Arch().Distro()
// If the live installer is generated for Fedora 39 or higher then we enable the web ui
// kernel options. This is a temporary thing as the check for this should really lie with
// anaconda and their `liveinst` script to determine which frontend to start.
if common.VersionLessThan(distro.Releasever(), "39") {
img.AdditionalKernelOpts = []string{}
} else {
img.AdditionalKernelOpts = []string{"inst.webui"}
}
img.Platform = t.platform
img.Workload = workload
img.ExtraBasePackages = packageSets[installerPkgsKey]
@ -345,6 +334,7 @@ func iotCommitImage(workload workload.Workload,
img.OSTreeParent = parentCommit
img.OSVersion = t.arch.distro.osVersion
img.Filename = t.Filename()
img.InstallWeakDeps = false
return img, nil
}

View file

@ -30,7 +30,7 @@ func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
}
func vhdCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
return rpmmd.PackageSet{
Include: []string{
"@core",
"chrony",
@ -48,12 +48,10 @@ func vhdCommonPackageSet(t *imageType) rpmmd.PackageSet {
"zram-generator-defaults",
},
}
return ps
}
func vmdkCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
return rpmmd.PackageSet{
Include: []string{
"@Fedora Cloud Server",
"chrony",
@ -74,139 +72,144 @@ func vmdkCommonPackageSet(t *imageType) rpmmd.PackageSet {
"extlinux-bootloader",
},
}
return ps
}
// fedora iot commit OS package set
func iotCommitPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"fedora-release-iot",
"glibc",
"glibc-minimal-langpack",
"nss-altfiles",
"sssd-client",
"libsss_sudo",
"shadow-utils",
"dracut-network",
"polkit",
"lvm2",
"cryptsetup",
"pinentry",
"keyutils",
"cracklib-dicts",
"e2fsprogs",
"xfsprogs",
"dosfstools",
"gnupg2",
"basesystem",
"python3",
"bash",
"xz",
"gzip",
"coreutils",
"which",
"curl",
"firewalld",
"iptables",
"NetworkManager",
"NetworkManager-wifi",
"NetworkManager-wwan",
"wpa_supplicant",
"iwd",
"tpm2-pkcs11",
"dnsmasq",
"traceroute",
"hostname",
"iproute",
"iputils",
"openssh-clients",
"openssh-server",
"passwd",
"policycoreutils",
"procps-ng",
"rootfiles",
"rpm",
"smartmontools-selinux",
"setup",
"shadow-utils",
"sudo",
"systemd",
"util-linux",
"vim-minimal",
"less",
"tar",
"fwupd",
"usbguard",
"greenboot",
"ignition",
"zezere-ignition",
"rsync",
"aardvark-dns",
"atheros-firmware",
"attr",
"ima-evm-utils",
"authselect",
"basesystem",
"bash",
"bash-completion",
"tmux",
"screen",
"policycoreutils-python-utils",
"setools-console",
"audit",
"rng-tools",
"brcmfmac-firmware",
"chrony",
"bluez",
"bluez-libs",
"bluez-mesh",
"kernel-tools",
"libgpiod-utils",
"podman",
"container-selinux",
"skopeo",
"criu",
"slirp4netns",
"fuse-overlayfs",
"clevis",
"clevis-dracut",
"clevis-luks",
"clevis-pin-tpm2",
"parsec",
"container-selinux",
"containernetworking-plugins",
"coreutils",
"cracklib-dicts",
"criu",
"cryptsetup",
"curl",
"dbus-parsec",
"iwl7260-firmware",
"iwlax2xx-firmware",
"dnsmasq",
"dosfstools",
"dracut-config-generic",
"dracut-network",
"e2fsprogs",
"efibootmgr",
"fedora-release-iot",
"firewalld",
"fwupd",
"fwupd-efi",
"fwupd-plugin-modem-manager",
"fwupd-plugin-uefi-capsule-data",
"glibc",
"glibc-minimal-langpack",
"gnupg2",
"greenboot",
"greenboot-default-health-checks",
"gzip",
"hostname",
"ignition",
"ima-evm-utils",
"iproute",
"iputils",
"iwd",
"iwlwifi-mvm-firmware",
"kernel-tools",
"keyutils",
"less",
"libsss_sudo",
"linux-firmware",
"lvm2",
"netavark",
"NetworkManager",
"NetworkManager-wifi",
"NetworkManager-wwan",
"nss-altfiles",
"openssl",
"openssh-clients",
"openssh-server",
"parsec",
"passwd",
"pinentry",
"podman",
"podman-plugins",
"policycoreutils",
"policycoreutils-python-utils",
"polkit",
"procps-ng",
"realtek-firmware",
"rootfiles",
"rpm",
"screen",
"selinux-policy-targeted",
"setools-console",
"setup",
"shadow-utils",
"skopeo",
"slirp4netns",
"sssd-client",
"sudo",
"systemd",
"systemd-resolved",
"tar",
"tmux",
"tpm2-pkcs11",
"traceroute",
"usbguard",
"util-linux",
"vim-minimal",
"wpa_supplicant",
"wireless-regdb",
"xfsprogs",
"xz",
"zezere-ignition",
"zram-generator",
},
}
return ps
if !common.VersionLessThan(t.arch.distro.osVersion, "38") {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"fdo-client", // added in F38
},
})
}
return ps
}
// INSTALLER PACKAGE SET
func installerPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
return rpmmd.PackageSet{
Include: []string{
"anaconda-dracut",
"atheros-firmware",
"brcmfmac-firmware",
"curl",
"dracut-config-generic",
"dracut-network",
"hostname",
"iwl100-firmware",
"iwl1000-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"iwlwifi-dvm-firmware",
"iwlwifi-mvm-firmware",
"kernel",
"linux-firmware",
"less",
"nfs-utils",
"openssh-clients",
"ostree",
"plymouth",
"realtek-firmware",
"rng-tools",
"rpcbind",
"selinux-policy-targeted",
@ -216,8 +219,6 @@ func installerPackageSet(t *imageType) rpmmd.PackageSet {
"xz",
},
}
return ps
}
func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
@ -235,9 +236,11 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
"anaconda-dracut",
"anaconda-install-env-deps",
"anaconda-widgets",
"atheros-firmware",
"audit",
"bind-utils",
"bitmap-fangsongti-fonts",
"brcmfmac-firmware",
"bzip2",
"cryptsetup",
"curl",
@ -268,19 +271,8 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
"hostname",
"initscripts",
"ipmitool",
"iwl1000-firmware",
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6000g2a-firmware",
"iwl6000g2b-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"iwlwifi-dvm-firmware",
"iwlwifi-mvm-firmware",
"jomolhari-fonts",
"kacst-farsi-fonts",
"kacst-qurn-fonts",
@ -325,6 +317,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
"plymouth",
"python3-pyatspi",
"rdma-core",
"realtek-firmware",
"rit-meera-new-fonts",
"rng-tools",
"rpcbind",
@ -486,7 +479,6 @@ func containerPackageSet(t *imageType) rpmmd.PackageSet {
"dnf-yum",
"dnf",
"fedora-release-container",
"fedora-repos-modular",
"glibc-minimal-langpack",
"rootfiles",
"rpm",
@ -529,6 +521,14 @@ func containerPackageSet(t *imageType) rpmmd.PackageSet {
},
}
if common.VersionLessThan(t.arch.distro.osVersion, "39") {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"fedora-repos-modular",
},
})
}
return ps
}

View file

@ -25,7 +25,7 @@ var azureRhuiImgType = imageType{
kernelOptions: "ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y",
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},

View file

@ -216,7 +216,7 @@ func osCustomizations(
return osc
}
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -224,7 +224,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], options, containers, customizations)
img.Environment = t.environment

View file

@ -19,7 +19,7 @@ var qcow2ImgType = imageType{
defaultImageConfig: qcow2DefaultImgConfig,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},

View file

@ -20,7 +20,7 @@ func amiImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -49,7 +49,7 @@ func ec2ImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -77,7 +77,7 @@ func ec2HaImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -98,7 +98,7 @@ func amiImgTypeAarch64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -126,7 +126,7 @@ func ec2ImgTypeAarch64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -154,7 +154,7 @@ func ec2SapImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto processor.max_cstate=1 intel_idle.max_cstate=1",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -26,7 +26,7 @@ func azureRhuiImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},
@ -47,7 +47,7 @@ func azureSapRhuiImgType(rd distribution) imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},
@ -67,7 +67,7 @@ func azureByosImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -88,7 +88,7 @@ func azureImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -110,7 +110,7 @@ func azureEap7RhuiImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},

View file

@ -125,7 +125,7 @@ func (d *distribution) getDefaultImageConfig() *distro.ImageConfig {
// New creates a new distro object, defining the supported architectures and image types
func New() distro.Distro {
// default minor: create default minor version (current GA) and rename it
d := newDistro("rhel", 7)
d := newDistro("rhel", 8)
d.name = "rhel-8"
return d

View file

@ -152,7 +152,7 @@ func minimalRawImgType(rd distribution) imageType {
kernelOptions: "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0",
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -22,7 +22,7 @@ func gceImgType(rd distribution) imageType {
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},
@ -43,7 +43,7 @@ func gceRhuiImgType(rd distribution) imageType {
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},

View file

@ -227,7 +227,7 @@ func osCustomizations(
return osc
}
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -235,7 +235,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], options, containers, customizations)
img.Environment = t.environment

View file

@ -22,7 +22,7 @@ func qcow2ImgType(rd distribution) imageType {
},
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
@ -58,7 +58,7 @@ func openstackImgType() imageType {
kernelOptions: "ro net.ifnames=0",
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},

View file

@ -18,7 +18,7 @@ func vmdkImgType() imageType {
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
@ -37,7 +37,7 @@ func ovaImgType() imageType {
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},

View file

@ -21,7 +21,7 @@ var (
kernelOptions: amiKernelOptions,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -39,7 +39,7 @@ var (
kernelOptions: amiKernelOptions,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -58,7 +58,7 @@ var (
kernelOptions: amiKernelOptions,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -76,7 +76,7 @@ var (
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -95,7 +95,7 @@ var (
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -114,7 +114,7 @@ var (
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 processor.max_cstate=1 intel_idle.max_cstate=1",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -23,7 +23,7 @@ var (
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -42,7 +42,7 @@ var (
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -62,7 +62,7 @@ var (
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},

View file

@ -126,7 +126,7 @@ func (d *distribution) getDefaultImageConfig() *distro.ImageConfig {
func New() distro.Distro {
// default minor: create default minor version (current GA) and rename it
d := newDistro("rhel", 1)
d := newDistro("rhel", 2)
d.name = "rhel-9"
return d
}

View file

@ -184,7 +184,7 @@ var (
kernelOptions: "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0",
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -21,7 +21,7 @@ var (
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},
@ -39,7 +39,7 @@ var (
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},

View file

@ -224,7 +224,7 @@ func osCustomizations(
return osc
}
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -232,7 +232,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], options, containers, customizations)
img.Environment = t.environment

View file

@ -23,7 +23,7 @@ var (
kernelOptions: "ro net.ifnames=0",
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
@ -162,7 +162,7 @@ func mkQcow2ImgType(d distribution) imageType {
},
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},

View file

@ -22,7 +22,7 @@ var vmdkImgType = imageType{
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
@ -42,7 +42,7 @@ var ovaImgType = imageType{
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},

View file

@ -16,7 +16,7 @@ import (
"github.com/osbuild/images/pkg/runner"
)
type LiveImage struct {
type DiskImage struct {
Base
Platform platform.Platform
PartitionTable *disk.PartitionTable
@ -34,14 +34,14 @@ type LiveImage struct {
OSNick string
}
func NewLiveImage() *LiveImage {
return &LiveImage{
Base: NewBase("live-image"),
func NewDiskImage() *DiskImage {
return &DiskImage{
Base: NewBase("disk"),
PartTool: osbuild.PTSfdisk,
}
}
func (img *LiveImage) InstantiateManifest(m *manifest.Manifest,
func (img *DiskImage) InstantiateManifest(m *manifest.Manifest,
repos []rpmmd.RepoConfig,
runner runner.Runner,
rng *rand.Rand) (*artifact.Artifact, error) {

View file

@ -29,12 +29,15 @@ type OSTreeArchive struct {
OSVersion string
Filename string
InstallWeakDeps bool
}
func NewOSTreeArchive(ref string) *OSTreeArchive {
return &OSTreeArchive{
Base: NewBase("ostree-archive"),
OSTreeRef: ref,
Base: NewBase("ostree-archive"),
OSTreeRef: ref,
InstallWeakDeps: true,
}
}
@ -51,6 +54,7 @@ func (img *OSTreeArchive) InstantiateManifest(m *manifest.Manifest,
osPipeline.Workload = img.Workload
osPipeline.OSTreeParent = img.OSTreeParent
osPipeline.OSTreeRef = img.OSTreeRef
osPipeline.InstallWeakDeps = img.InstallWeakDeps
ostreeCommitPipeline := manifest.NewOSTreeCommit(m, buildPipeline, osPipeline, img.OSTreeRef)
ostreeCommitPipeline.OSVersion = img.OSVersion

View file

@ -145,8 +145,9 @@ func (p *AnacondaInstaller) getPackageSetChain(Distro) []rpmmd.PackageSet {
}
return []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
InstallWeakDeps: true,
},
}
}

View file

@ -59,8 +59,9 @@ func (p *Build) getPackageSetChain(distro Distro) []rpmmd.PackageSet {
return []rpmmd.PackageSet{
{
Include: packages,
Repositories: p.repos,
Include: packages,
Repositories: p.repos,
InstallWeakDeps: true,
},
}
}

View file

@ -66,8 +66,9 @@ func (p *OSTreeCommitServer) getPackageSetChain(Distro) []rpmmd.PackageSet {
packages := []string{"nginx"}
return []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
InstallWeakDeps: true,
},
}
}

View file

@ -113,8 +113,9 @@ func (p *CoreOSInstaller) getPackageSetChain(Distro) []rpmmd.PackageSet {
packages := p.getBootPackages()
return []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
InstallWeakDeps: true,
},
}
}

View file

@ -164,6 +164,8 @@ type OS struct {
OSProduct string
OSVersion string
OSNick string
InstallWeakDeps bool
}
// NewOS creates a new OS pipeline. build is the build pipeline to use for
@ -175,9 +177,10 @@ func NewOS(m *Manifest,
repos []rpmmd.RepoConfig) *OS {
name := "os"
p := &OS{
Base: NewBase(m, name, buildPipeline),
repos: filterRepos(repos, name),
platform: platform,
Base: NewBase(m, name, buildPipeline),
repos: filterRepos(repos, name),
platform: platform,
InstallWeakDeps: true,
}
buildPipeline.addDependent(p)
m.addPipeline(p)
@ -227,11 +230,13 @@ func (p *OS) getPackageSetChain(Distro) []rpmmd.PackageSet {
}
osRepos := append(p.repos, p.ExtraBaseRepos...)
chain := []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraBasePackages...),
Exclude: p.ExcludeBasePackages,
Repositories: osRepos,
Include: append(packages, p.ExtraBasePackages...),
Exclude: p.ExcludeBasePackages,
Repositories: osRepos,
InstallWeakDeps: p.InstallWeakDeps,
},
}

View file

@ -123,9 +123,10 @@ func (pkg Package) ToPackageInfo() PackageInfo {
// to exclude. The Repositories are used when depsolving this package set in
// addition to the base repositories.
type PackageSet struct {
Include []string
Exclude []string
Repositories []RepoConfig
Include []string
Exclude []string
Repositories []RepoConfig
InstallWeakDeps bool
}
// Append the Include and Exclude package list from another PackageSet and

View file

@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "0.132.0"
const Version = "0.134.0"

12
vendor/modules.txt vendored
View file

@ -4,7 +4,7 @@ cloud.google.com/go/internal
cloud.google.com/go/internal/optional
cloud.google.com/go/internal/trace
cloud.google.com/go/internal/version
# cloud.google.com/go/compute v1.22.0
# cloud.google.com/go/compute v1.23.0
## explicit; go 1.19
cloud.google.com/go/compute/apiv1
cloud.google.com/go/compute/apiv1/computepb
@ -119,7 +119,7 @@ github.com/acarl005/stripansi
# github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
## explicit; go 1.13
github.com/asaskevich/govalidator
# github.com/aws/aws-sdk-go v1.44.304
# github.com/aws/aws-sdk-go v1.44.313
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn
@ -644,8 +644,8 @@ github.com/oracle/oci-go-sdk/v54/identity
github.com/oracle/oci-go-sdk/v54/objectstorage
github.com/oracle/oci-go-sdk/v54/objectstorage/transfer
github.com/oracle/oci-go-sdk/v54/workrequests
# github.com/osbuild/images v0.0.0-20230720095604-246b718310ea
## explicit; go 1.18
# github.com/osbuild/images v0.0.0-20230801094908-157e798fdf8d
## explicit; go 1.19
github.com/osbuild/images/internal/common
github.com/osbuild/images/internal/dnfjson
github.com/osbuild/images/internal/environment
@ -972,7 +972,7 @@ golang.org/x/tools/internal/typeparams
## explicit; go 1.17
golang.org/x/xerrors
golang.org/x/xerrors/internal
# google.golang.org/api v0.132.0
# google.golang.org/api v0.134.0
## explicit; go 1.19
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@ -1014,7 +1014,7 @@ google.golang.org/genproto/internal
## explicit; go 1.19
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails