deps: update osbuild/images to 157e798fdf8d

Update the osbuild/images dependency from 246b718310ea to 157e798fdf8d.
This commit is contained in:
Achilleas Koutsou 2023-08-01 12:42:59 +02:00 committed by Tomáš Hozza
parent 4c7b3dd25a
commit a4798ea64d
55 changed files with 42304 additions and 41796 deletions

View file

@ -2660,6 +2660,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -2675,12 +2678,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -2690,6 +2699,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -8566,6 +8578,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -8575,18 +8590,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -8596,6 +8620,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -10856,6 +10883,9 @@ var awsPartition = partition{
},
"emr-containers": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@ -12805,6 +12835,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -12814,6 +12847,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -13926,6 +13962,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor.ca-central-1.api.aws",
},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.ca-central-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{
@ -13986,21 +14028,45 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-2",
}: endpoint{
Hostname: "internetmonitor.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-east-2.api.aws",
},
endpointKey{
Region: "us-west-1",
}: endpoint{
Hostname: "internetmonitor.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-2",
}: endpoint{
Hostname: "internetmonitor.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-west-2.api.aws",
},
},
},
"iot": service{
@ -25178,7 +25244,7 @@ var awsPartition = partition{
Region: "af-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.af-south-1.amazonaws.com",
Hostname: "servicediscovery.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
@ -25187,7 +25253,7 @@ var awsPartition = partition{
Region: "ap-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-east-1.amazonaws.com",
Hostname: "servicediscovery.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
@ -25196,7 +25262,7 @@ var awsPartition = partition{
Region: "ap-northeast-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-northeast-1.amazonaws.com",
Hostname: "servicediscovery.ap-northeast-1.api.aws",
},
endpointKey{
Region: "ap-northeast-2",
@ -25205,7 +25271,7 @@ var awsPartition = partition{
Region: "ap-northeast-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-northeast-2.amazonaws.com",
Hostname: "servicediscovery.ap-northeast-2.api.aws",
},
endpointKey{
Region: "ap-northeast-3",
@ -25214,7 +25280,7 @@ var awsPartition = partition{
Region: "ap-northeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-northeast-3.amazonaws.com",
Hostname: "servicediscovery.ap-northeast-3.api.aws",
},
endpointKey{
Region: "ap-south-1",
@ -25223,7 +25289,7 @@ var awsPartition = partition{
Region: "ap-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-south-1.amazonaws.com",
Hostname: "servicediscovery.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-south-2",
@ -25232,7 +25298,7 @@ var awsPartition = partition{
Region: "ap-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-south-2.amazonaws.com",
Hostname: "servicediscovery.ap-south-2.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
@ -25241,7 +25307,7 @@ var awsPartition = partition{
Region: "ap-southeast-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-1.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-1.api.aws",
},
endpointKey{
Region: "ap-southeast-2",
@ -25250,7 +25316,7 @@ var awsPartition = partition{
Region: "ap-southeast-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-2.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
@ -25259,7 +25325,7 @@ var awsPartition = partition{
Region: "ap-southeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-3.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
@ -25268,7 +25334,7 @@ var awsPartition = partition{
Region: "ap-southeast-4",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ap-southeast-4.amazonaws.com",
Hostname: "servicediscovery.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
@ -25277,7 +25343,7 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.ca-central-1.amazonaws.com",
Hostname: "servicediscovery.ca-central-1.api.aws",
},
endpointKey{
Region: "ca-central-1",
@ -25285,6 +25351,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.ca-central-1.api.aws",
},
endpointKey{
Region: "ca-central-1-fips",
}: endpoint{
@ -25301,7 +25373,7 @@ var awsPartition = partition{
Region: "eu-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-central-1.amazonaws.com",
Hostname: "servicediscovery.eu-central-1.api.aws",
},
endpointKey{
Region: "eu-central-2",
@ -25310,7 +25382,7 @@ var awsPartition = partition{
Region: "eu-central-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-central-2.amazonaws.com",
Hostname: "servicediscovery.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
@ -25319,7 +25391,7 @@ var awsPartition = partition{
Region: "eu-north-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-north-1.amazonaws.com",
Hostname: "servicediscovery.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
@ -25328,7 +25400,7 @@ var awsPartition = partition{
Region: "eu-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-south-1.amazonaws.com",
Hostname: "servicediscovery.eu-south-1.api.aws",
},
endpointKey{
Region: "eu-south-2",
@ -25337,7 +25409,7 @@ var awsPartition = partition{
Region: "eu-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-south-2.amazonaws.com",
Hostname: "servicediscovery.eu-south-2.api.aws",
},
endpointKey{
Region: "eu-west-1",
@ -25346,7 +25418,7 @@ var awsPartition = partition{
Region: "eu-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-west-1.amazonaws.com",
Hostname: "servicediscovery.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
@ -25355,7 +25427,7 @@ var awsPartition = partition{
Region: "eu-west-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-west-2.amazonaws.com",
Hostname: "servicediscovery.eu-west-2.api.aws",
},
endpointKey{
Region: "eu-west-3",
@ -25364,7 +25436,7 @@ var awsPartition = partition{
Region: "eu-west-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.eu-west-3.amazonaws.com",
Hostname: "servicediscovery.eu-west-3.api.aws",
},
endpointKey{
Region: "me-central-1",
@ -25373,7 +25445,7 @@ var awsPartition = partition{
Region: "me-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.me-central-1.amazonaws.com",
Hostname: "servicediscovery.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
@ -25382,7 +25454,7 @@ var awsPartition = partition{
Region: "me-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.me-south-1.amazonaws.com",
Hostname: "servicediscovery.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
@ -25391,7 +25463,7 @@ var awsPartition = partition{
Region: "sa-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.sa-east-1.amazonaws.com",
Hostname: "servicediscovery.sa-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
@ -25400,7 +25472,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-east-1.amazonaws.com",
Hostname: "servicediscovery.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
@ -25408,6 +25480,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1-fips",
}: endpoint{
@ -25424,7 +25502,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-east-2.amazonaws.com",
Hostname: "servicediscovery.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2",
@ -25432,6 +25510,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2-fips",
}: endpoint{
@ -25448,7 +25532,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-west-1.amazonaws.com",
Hostname: "servicediscovery.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-1",
@ -25456,6 +25540,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-1-fips",
}: endpoint{
@ -25472,7 +25562,7 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.us-west-2.amazonaws.com",
Hostname: "servicediscovery.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2",
@ -25480,6 +25570,12 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-west-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2-fips",
}: endpoint{
@ -25855,75 +25951,6 @@ var awsPartition = partition{
},
"sms": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "sms-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "sms-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "sms-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@ -25933,39 +25960,6 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -32598,11 +32592,18 @@ var awscnPartition = partition{
},
},
"savingsplans": service{
PartitionEndpoint: "aws-cn",
IsRegionalized: boxedFalse,
IsRegionalized: boxedTrue,
Endpoints: serviceEndpoints{
endpointKey{
Region: "aws-cn",
Region: "cn-north-1",
}: endpoint{
Hostname: "savingsplans.cn-north-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-north-1",
},
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
@ -32669,7 +32670,7 @@ var awscnPartition = partition{
Region: "cn-north-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn",
Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn",
},
endpointKey{
Region: "cn-northwest-1",
@ -32678,7 +32679,7 @@ var awscnPartition = partition{
Region: "cn-northwest-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn",
Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
},
@ -32712,9 +32713,6 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"snowball": service{
@ -38081,6 +38079,12 @@ var awsusgovPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
@ -38105,6 +38109,12 @@ var awsusgovPartition = partition{
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
@ -38179,15 +38189,6 @@ var awsusgovPartition = partition{
},
"sms": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@ -38197,15 +38198,6 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@ -39775,6 +39767,15 @@ var awsisoPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-iso-west-1",
}: endpoint{
Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
@ -39784,6 +39785,15 @@ var awsisoPartition = partition{
}: endpoint{
Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
},
},
},
"rds": service{

View file

@ -191,7 +191,10 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req
if err != nil {
return nil, err
}
mySession := Must(NewSession())
// create oidcClient with AnonymousCredentials to avoid recursively resolving credentials
mySession := Must(NewSession(&aws.Config{
Credentials: credentials.AnonymousCredentials,
}))
oidcClient := ssooidc.New(mySession, cfgCopy)
tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath)
optFns = append(optFns, func(p *ssocreds.Provider) {

View file

@ -8,7 +8,7 @@
// Generally using the signer outside of the SDK should not require any additional
// logic when using Go v1.5 or higher. The signer does this by taking advantage
// of the URL.EscapedPath method. If your request URI requires additional escaping
// you many need to use the URL.Opaque to define what the raw URI should be sent
// you may need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
//
// The signer will first check the URL.Opaque field, and use its value if set.

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.304"
const SDKVersion = "1.44.313"

View file

@ -769,7 +769,7 @@ func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleAct
// If you finish before the timeout period ends, send a callback by using the
// CompleteLifecycleAction API call.
//
// For more information, see Amazon EC2 Auto Scaling lifecycle hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html)
// For more information, see Complete a lifecycle action (https://docs.aws.amazon.com/autoscaling/ec2/userguide/completing-lifecycle-hooks.html)
// in the Amazon EC2 Auto Scaling User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@ -4167,6 +4167,12 @@ func (c *AutoScaling) DescribeWarmPoolRequest(input *DescribeWarmPoolInput) (req
Name: opDescribeWarmPool,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
@ -4229,6 +4235,57 @@ func (c *AutoScaling) DescribeWarmPoolWithContext(ctx aws.Context, input *Descri
return out, req.Send()
}
// DescribeWarmPoolPages iterates over the pages of a DescribeWarmPool operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeWarmPool method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeWarmPool operation.
// pageNum := 0
// err := client.DescribeWarmPoolPages(params,
// func(page *autoscaling.DescribeWarmPoolOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
func (c *AutoScaling) DescribeWarmPoolPages(input *DescribeWarmPoolInput, fn func(*DescribeWarmPoolOutput, bool) bool) error {
return c.DescribeWarmPoolPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeWarmPoolPagesWithContext same as DescribeWarmPoolPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *AutoScaling) DescribeWarmPoolPagesWithContext(ctx aws.Context, input *DescribeWarmPoolInput, fn func(*DescribeWarmPoolOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeWarmPoolInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeWarmPoolRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*DescribeWarmPoolOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opDetachInstances = "DetachInstances"
// DetachInstancesRequest generates a "aws/request.Request" representing the
@ -4560,7 +4617,7 @@ func (c *AutoScaling) DetachTrafficSourcesRequest(input *DetachTrafficSourcesInp
//
// Detaches one or more traffic sources from the specified Auto Scaling group.
//
// When you detach a taffic, it enters the Removing state while deregistering
// When you detach a traffic source, it enters the Removing state while deregistering
// the instances in the group. When all instances are deregistered, then you
// can no longer describe the traffic source using the DescribeTrafficSources
// API call. The instances continue to run.
@ -6981,6 +7038,38 @@ func (s *Alarm) SetAlarmName(v string) *Alarm {
return s
}
// Specifies the CloudWatch alarm specification to use in an instance refresh.
type AlarmSpecification struct {
_ struct{} `type:"structure"`
// The names of one or more CloudWatch alarms to monitor for the instance refresh.
Alarms []*string `type:"list"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AlarmSpecification) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AlarmSpecification) GoString() string {
return s.String()
}
// SetAlarms sets the Alarms field's value.
func (s *AlarmSpecification) SetAlarms(v []*string) *AlarmSpecification {
s.Alarms = v
return s
}
type AttachInstancesInput struct {
_ struct{} `type:"structure"`
@ -18014,7 +18103,7 @@ type PutScalingPolicyInput struct {
// The amount by which to scale, based on the specified adjustment type. A positive
// value adds to the current capacity while a negative number removes from the
// current capacity. For exact capacity, you must specify a positive value.
// current capacity. For exact capacity, you must specify a non-negative value.
//
// Required if the policy type is SimpleScaling. (Not used with any other policy
// type.)
@ -18687,8 +18776,13 @@ func (s RecordLifecycleActionHeartbeatOutput) GoString() string {
type RefreshPreferences struct {
_ struct{} `type:"structure"`
// (Optional) The CloudWatch alarm specification. CloudWatch alarms can be used
// to identify any issues and fail the operation if an alarm threshold is met.
AlarmSpecification *AlarmSpecification `type:"structure"`
// (Optional) Indicates whether to roll back the Auto Scaling group to its previous
// configuration if the instance refresh fails. The default is false.
// configuration if the instance refresh fails or a CloudWatch alarm threshold
// is met. The default is false.
//
// A rollback is not supported in the following situations:
//
@ -18700,6 +18794,9 @@ type RefreshPreferences struct {
//
// * The Auto Scaling group uses the launch template's $Latest or $Default
// version.
//
// For more information, see Undo changes with a rollback (https://docs.aws.amazon.com/autoscaling/ec2/userguide/instance-refresh-rollback.html)
// in the Amazon EC2 Auto Scaling User Guide.
AutoRollback *bool `type:"boolean"`
// (Optional) The amount of time, in seconds, to wait after a checkpoint before
@ -18812,6 +18909,12 @@ func (s RefreshPreferences) GoString() string {
return s.String()
}
// SetAlarmSpecification sets the AlarmSpecification field's value.
func (s *RefreshPreferences) SetAlarmSpecification(v *AlarmSpecification) *RefreshPreferences {
s.AlarmSpecification = v
return s
}
// SetAutoRollback sets the AutoRollback field's value.
func (s *RefreshPreferences) SetAutoRollback(v bool) *RefreshPreferences {
s.AutoRollback = &v
@ -18957,7 +19060,9 @@ type RollbackInstanceRefreshInput struct {
_ struct{} `type:"structure"`
// The name of the Auto Scaling group.
AutoScalingGroupName *string `min:"1" type:"string"`
//
// AutoScalingGroupName is a required field
AutoScalingGroupName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation.
@ -18981,6 +19086,9 @@ func (s RollbackInstanceRefreshInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *RollbackInstanceRefreshInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RollbackInstanceRefreshInput"}
if s.AutoScalingGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName"))
}
if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1))
}
@ -19899,6 +20007,8 @@ type StartInstanceRefreshInput struct {
//
// * Checkpoints
//
// * CloudWatch alarms
//
// * Skip matching
Preferences *RefreshPreferences `type:"structure"`
@ -20052,12 +20162,7 @@ type StepAdjustment struct {
// The amount by which to scale, based on the specified adjustment type. A positive
// value adds to the current capacity while a negative number removes from the
// current capacity.
//
// The amount by which to scale. The adjustment is based on the value that you
// specified in the AdjustmentType property (either an absolute number or a
// percentage). A positive value adds to the current capacity and a negative
// number subtracts from the current capacity.
// current capacity. For exact capacity, you must specify a non-negative value.
//
// ScalingAdjustment is a required field
ScalingAdjustment *int64 `type:"integer" required:"true"`

View file

@ -120713,6 +120713,9 @@ type GetEbsEncryptionByDefaultOutput struct {
// Indicates whether encryption by default is enabled.
EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
}
// String returns the string representation.
@ -120739,6 +120742,12 @@ func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *Get
return s
}
// SetSseType sets the SseType field's value.
func (s *GetEbsEncryptionByDefaultOutput) SetSseType(v string) *GetEbsEncryptionByDefaultOutput {
s.SseType = &v
return s
}
type GetFlowLogsIntegrationTemplateInput struct {
_ struct{} `type:"structure"`
@ -128525,6 +128534,10 @@ type InferenceAcceleratorInfo struct {
// Describes the Inference accelerators for the instance type.
Accelerators []*InferenceDeviceInfo `locationName:"accelerators" type:"list"`
// The total size of the memory for the inference accelerators for the instance
// type, in MiB.
TotalInferenceMemoryInMiB *int64 `locationName:"totalInferenceMemoryInMiB" type:"integer"`
}
// String returns the string representation.
@ -128551,6 +128564,12 @@ func (s *InferenceAcceleratorInfo) SetAccelerators(v []*InferenceDeviceInfo) *In
return s
}
// SetTotalInferenceMemoryInMiB sets the TotalInferenceMemoryInMiB field's value.
func (s *InferenceAcceleratorInfo) SetTotalInferenceMemoryInMiB(v int64) *InferenceAcceleratorInfo {
s.TotalInferenceMemoryInMiB = &v
return s
}
// Describes the Inference accelerators for the instance type.
type InferenceDeviceInfo struct {
_ struct{} `type:"structure"`
@ -128561,6 +128580,9 @@ type InferenceDeviceInfo struct {
// The manufacturer of the Inference accelerator.
Manufacturer *string `locationName:"manufacturer" type:"string"`
// Describes the memory available to the inference accelerator.
MemoryInfo *InferenceDeviceMemoryInfo `locationName:"memoryInfo" type:"structure"`
// The name of the Inference accelerator.
Name *string `locationName:"name" type:"string"`
}
@ -128595,12 +128617,50 @@ func (s *InferenceDeviceInfo) SetManufacturer(v string) *InferenceDeviceInfo {
return s
}
// SetMemoryInfo sets the MemoryInfo field's value.
func (s *InferenceDeviceInfo) SetMemoryInfo(v *InferenceDeviceMemoryInfo) *InferenceDeviceInfo {
s.MemoryInfo = v
return s
}
// SetName sets the Name field's value.
func (s *InferenceDeviceInfo) SetName(v string) *InferenceDeviceInfo {
s.Name = &v
return s
}
// Describes the memory available to the inference accelerator.
type InferenceDeviceMemoryInfo struct {
_ struct{} `type:"structure"`
// The size of the memory available to the inference accelerator, in MiB.
SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InferenceDeviceMemoryInfo) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InferenceDeviceMemoryInfo) GoString() string {
return s.String()
}
// SetSizeInMiB sets the SizeInMiB field's value.
func (s *InferenceDeviceMemoryInfo) SetSizeInMiB(v int64) *InferenceDeviceMemoryInfo {
s.SizeInMiB = &v
return s
}
// Describes an instance.
type Instance struct {
_ struct{} `type:"structure"`
@ -150493,6 +150553,9 @@ func (s *NetworkBandwidthGbpsRequest) SetMin(v float64) *NetworkBandwidthGbpsReq
type NetworkCardInfo struct {
_ struct{} `type:"structure"`
// The baseline network performance of the network card, in Gbps.
BaselineBandwidthInGbps *float64 `locationName:"baselineBandwidthInGbps" type:"double"`
// The maximum number of network interfaces for the network card.
MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"`
@ -150501,6 +150564,9 @@ type NetworkCardInfo struct {
// The network performance of the network card.
NetworkPerformance *string `locationName:"networkPerformance" type:"string"`
// The peak (burst) network performance of the network card, in Gbps.
PeakBandwidthInGbps *float64 `locationName:"peakBandwidthInGbps" type:"double"`
}
// String returns the string representation.
@ -150521,6 +150587,12 @@ func (s NetworkCardInfo) GoString() string {
return s.String()
}
// SetBaselineBandwidthInGbps sets the BaselineBandwidthInGbps field's value.
func (s *NetworkCardInfo) SetBaselineBandwidthInGbps(v float64) *NetworkCardInfo {
s.BaselineBandwidthInGbps = &v
return s
}
// SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value.
func (s *NetworkCardInfo) SetMaximumNetworkInterfaces(v int64) *NetworkCardInfo {
s.MaximumNetworkInterfaces = &v
@ -150539,6 +150611,12 @@ func (s *NetworkCardInfo) SetNetworkPerformance(v string) *NetworkCardInfo {
return s
}
// SetPeakBandwidthInGbps sets the PeakBandwidthInGbps field's value.
func (s *NetworkCardInfo) SetPeakBandwidthInGbps(v float64) *NetworkCardInfo {
s.PeakBandwidthInGbps = &v
return s
}
// Describes the networking features of the instance type.
type NetworkInfo struct {
_ struct{} `type:"structure"`
@ -162638,6 +162716,9 @@ type RestoreSnapshotFromRecycleBinOutput struct {
// The ID of the snapshot.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// The time stamp when the snapshot was initiated.
StartTime *time.Time `locationName:"startTime" type:"timestamp"`
@ -162705,6 +162786,12 @@ func (s *RestoreSnapshotFromRecycleBinOutput) SetSnapshotId(v string) *RestoreSn
return s
}
// SetSseType sets the SseType field's value.
func (s *RestoreSnapshotFromRecycleBinOutput) SetSseType(v string) *RestoreSnapshotFromRecycleBinOutput {
s.SseType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *RestoreSnapshotFromRecycleBinOutput) SetStartTime(v time.Time) *RestoreSnapshotFromRecycleBinOutput {
s.StartTime = &v
@ -167454,6 +167541,9 @@ type Snapshot struct {
// is created.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// The time stamp when the snapshot was initiated.
StartTime *time.Time `locationName:"startTime" type:"timestamp"`
@ -167563,6 +167653,12 @@ func (s *Snapshot) SetSnapshotId(v string) *Snapshot {
return s
}
// SetSseType sets the SseType field's value.
func (s *Snapshot) SetSseType(v string) *Snapshot {
s.SseType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *Snapshot) SetStartTime(v time.Time) *Snapshot {
s.StartTime = &v
@ -167813,6 +167909,9 @@ type SnapshotInfo struct {
// Snapshot id that can be used to describe this snapshot.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// Time this snapshot was started. This is the same for all snapshots initiated
// by the same request.
StartTime *time.Time `locationName:"startTime" type:"timestamp"`
@ -167884,6 +167983,12 @@ func (s *SnapshotInfo) SetSnapshotId(v string) *SnapshotInfo {
return s
}
// SetSseType sets the SseType field's value.
func (s *SnapshotInfo) SetSseType(v string) *SnapshotInfo {
s.SseType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *SnapshotInfo) SetStartTime(v time.Time) *SnapshotInfo {
s.StartTime = &v
@ -179154,6 +179259,9 @@ type Volume struct {
// The snapshot from which the volume was created, if applicable.
SnapshotId *string `locationName:"snapshotId" type:"string"`
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
// The volume state.
State *string `locationName:"status" type:"string" enum:"VolumeState"`
@ -179254,6 +179362,12 @@ func (s *Volume) SetSnapshotId(v string) *Volume {
return s
}
// SetSseType sets the SseType field's value.
func (s *Volume) SetSseType(v string) *Volume {
s.SseType = &v
return s
}
// SetState sets the State field's value.
func (s *Volume) SetState(v string) *Volume {
s.State = &v
@ -189848,6 +189962,26 @@ func RuleAction_Values() []string {
}
}
const (
// SSETypeSseEbs is a SSEType enum value
SSETypeSseEbs = "sse-ebs"
// SSETypeSseKms is a SSEType enum value
SSETypeSseKms = "sse-kms"
// SSETypeNone is a SSEType enum value
SSETypeNone = "none"
)
// SSEType_Values returns all elements of the SSEType enum
func SSEType_Values() []string {
return []string{
SSETypeSseEbs,
SSETypeSseKms,
SSETypeNone,
}
}
const (
// ScopeAvailabilityZone is a Scope enum value
ScopeAvailabilityZone = "Availability Zone"
@ -190067,6 +190201,9 @@ const (
// SpotInstanceStateFailed is a SpotInstanceState enum value
SpotInstanceStateFailed = "failed"
// SpotInstanceStateDisabled is a SpotInstanceState enum value
SpotInstanceStateDisabled = "disabled"
)
// SpotInstanceState_Values returns all elements of the SpotInstanceState enum
@ -190077,6 +190214,7 @@ func SpotInstanceState_Values() []string {
SpotInstanceStateClosed,
SpotInstanceStateCancelled,
SpotInstanceStateFailed,
SpotInstanceStateDisabled,
}
}

View file

@ -1283,6 +1283,62 @@ func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context,
return w.WaitWithContext(ctx)
}
// WaitUntilStoreImageTaskComplete uses the Amazon EC2 API operation
// DescribeStoreImageTasks to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *EC2) WaitUntilStoreImageTaskComplete(input *DescribeStoreImageTasksInput) error {
return c.WaitUntilStoreImageTaskCompleteWithContext(aws.BackgroundContext(), input)
}
// WaitUntilStoreImageTaskCompleteWithContext is an extended version of WaitUntilStoreImageTaskComplete.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) WaitUntilStoreImageTaskCompleteWithContext(ctx aws.Context, input *DescribeStoreImageTasksInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilStoreImageTaskComplete",
MaxAttempts: 40,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathAllWaiterMatch, Argument: "StoreImageTaskResults[].StoreTaskState",
Expected: "Completed",
},
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "StoreImageTaskResults[].StoreTaskState",
Expected: "Failed",
},
{
State: request.RetryWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "StoreImageTaskResults[].StoreTaskState",
Expected: "InProgress",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeStoreImageTasksInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeStoreImageTasksRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilSubnetAvailable uses the Amazon EC2 API operation
// DescribeSubnets to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will

View file

@ -10709,6 +10709,7 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject
}
var _ awserr.Error
var _ time.Time
// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent.
//

View file

@ -1460,6 +1460,9 @@ type AssumeRoleInput struct {
// in the IAM User Guide.
PolicyArns []*PolicyDescriptorType `type:"list"`
// Reserved for future use.
ProvidedContexts []*ProvidedContext `type:"list"`
// The Amazon Resource Name (ARN) of the role to assume.
//
// RoleArn is a required field
@ -1633,6 +1636,16 @@ func (s *AssumeRoleInput) Validate() error {
}
}
}
if s.ProvidedContexts != nil {
for i, v := range s.ProvidedContexts {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams))
}
}
}
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
@ -1674,6 +1687,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn
return s
}
// SetProvidedContexts sets the ProvidedContexts field's value.
func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput {
s.ProvidedContexts = v
return s
}
// SetRoleArn sets the RoleArn field's value.
func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
s.RoleArn = &v
@ -2266,7 +2285,8 @@ type AssumeRoleWithWebIdentityInput struct {
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
// the identity provider. Your application must get this token by authenticating
// the user who is using your application with a web identity provider before
// the application makes an AssumeRoleWithWebIdentity call.
// the application makes an AssumeRoleWithWebIdentity call. Only tokens with
// RSA algorithms (RS256) are supported.
//
// WebIdentityToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's
@ -3385,6 +3405,63 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
return s
}
// Reserved for future use.
type ProvidedContext struct {
_ struct{} `type:"structure"`
// Reserved for future use.
ContextAssertion *string `min:"4" type:"string"`
// Reserved for future use.
ProviderArn *string `min:"20" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ProvidedContext) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ProvidedContext) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ProvidedContext) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"}
if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 {
invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4))
}
if s.ProviderArn != nil && len(*s.ProviderArn) < 20 {
invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetContextAssertion sets the ContextAssertion field's value.
func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext {
s.ContextAssertion = &v
return s
}
// SetProviderArn sets the ProviderArn field's value.
func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext {
s.ProviderArn = &v
return s
}
// You can pass custom key-value pair attributes when you assume a role or federate
// a user. These are called session tags. You can then use the session tags
// to control access to resources. For more information, see Tagging Amazon

View file

@ -1,175 +0,0 @@
package common
import (
"encoding/json"
"io"
"github.com/labstack/gommon/log"
"github.com/sirupsen/logrus"
)
// EchoLogrusLogger extend logrus.Logger
type EchoLogrusLogger struct {
*logrus.Logger
}
var commonLogger = &EchoLogrusLogger{
Logger: logrus.StandardLogger(),
}
func Logger() *EchoLogrusLogger {
return commonLogger
}
func toEchoLevel(level logrus.Level) log.Lvl {
switch level {
case logrus.DebugLevel:
return log.DEBUG
case logrus.InfoLevel:
return log.INFO
case logrus.WarnLevel:
return log.WARN
case logrus.ErrorLevel:
return log.ERROR
}
return log.OFF
}
func (l *EchoLogrusLogger) Output() io.Writer {
return l.Out
}
func (l *EchoLogrusLogger) SetOutput(w io.Writer) {
// disable operations that would change behavior of global logrus logger.
}
func (l *EchoLogrusLogger) Level() log.Lvl {
return toEchoLevel(l.Logger.Level)
}
func (l *EchoLogrusLogger) SetLevel(v log.Lvl) {
// disable operations that would change behavior of global logrus logger.
}
func (l *EchoLogrusLogger) SetHeader(h string) {
}
func (l *EchoLogrusLogger) Prefix() string {
return ""
}
func (l *EchoLogrusLogger) SetPrefix(p string) {
}
func (l *EchoLogrusLogger) Print(i ...interface{}) {
l.Logger.Print(i...)
}
func (l *EchoLogrusLogger) Printf(format string, args ...interface{}) {
l.Logger.Printf(format, args...)
}
func (l *EchoLogrusLogger) Printj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Println(string(b))
}
func (l *EchoLogrusLogger) Debug(i ...interface{}) {
l.Logger.Debug(i...)
}
func (l *EchoLogrusLogger) Debugf(format string, args ...interface{}) {
l.Logger.Debugf(format, args...)
}
func (l *EchoLogrusLogger) Debugj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Debugln(string(b))
}
func (l *EchoLogrusLogger) Info(i ...interface{}) {
l.Logger.Info(i...)
}
func (l *EchoLogrusLogger) Infof(format string, args ...interface{}) {
l.Logger.Infof(format, args...)
}
func (l *EchoLogrusLogger) Infoj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Infoln(string(b))
}
func (l *EchoLogrusLogger) Warn(i ...interface{}) {
l.Logger.Warn(i...)
}
func (l *EchoLogrusLogger) Warnf(format string, args ...interface{}) {
l.Logger.Warnf(format, args...)
}
func (l *EchoLogrusLogger) Warnj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Warnln(string(b))
}
func (l *EchoLogrusLogger) Error(i ...interface{}) {
l.Logger.Error(i...)
}
func (l *EchoLogrusLogger) Errorf(format string, args ...interface{}) {
l.Logger.Errorf(format, args...)
}
func (l *EchoLogrusLogger) Errorj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Errorln(string(b))
}
func (l *EchoLogrusLogger) Fatal(i ...interface{}) {
l.Logger.Fatal(i...)
}
func (l *EchoLogrusLogger) Fatalf(format string, args ...interface{}) {
l.Logger.Fatalf(format, args...)
}
func (l *EchoLogrusLogger) Fatalj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Fatalln(string(b))
}
func (l *EchoLogrusLogger) Panic(i ...interface{}) {
l.Logger.Panic(i...)
}
func (l *EchoLogrusLogger) Panicf(format string, args ...interface{}) {
l.Logger.Panicf(format, args...)
}
func (l *EchoLogrusLogger) Panicj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Panicln(string(b))
}

View file

@ -1,22 +0,0 @@
package common
import (
"github.com/labstack/echo/v4"
"github.com/segmentio/ksuid"
)
const OperationIDKey string = "operationID"
// Adds a time-sortable globally unique identifier to an echo.Context if not already set
func OperationIDMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if c.Get(OperationIDKey) == nil {
c.Set(OperationIDKey, GenerateOperationID())
}
return next(c)
}
}
func GenerateOperationID() string {
return ksuid.New().String()
}

View file

@ -358,8 +358,9 @@ func (s *Solver) makeDepsolveRequest(pkgSets []rpmmd.PackageSet) (*Request, map[
transactions := make([]transactionArgs, len(pkgSets))
for dsIdx, pkgSet := range pkgSets {
transactions[dsIdx] = transactionArgs{
PackageSpecs: pkgSet.Include,
ExcludeSpecs: pkgSet.Exclude,
PackageSpecs: pkgSet.Include,
ExcludeSpecs: pkgSet.Exclude,
InstallWeakDeps: pkgSet.InstallWeakDeps,
}
for _, jobRepo := range pkgSet.Repositories {
@ -538,6 +539,9 @@ type transactionArgs struct {
// IDs of repositories to use for this depsolve
RepoIDs []string `json:"repo-ids"`
// If we want weak deps for this depsolve
InstallWeakDeps bool `json:"install_weak_deps"`
}
type packageSpecs []PackageSpec

View file

@ -24,6 +24,7 @@ var CustomDirectoriesPolicies = NewPathPolicies(map[string]PathPolicy{
var CustomFilesPolicies = NewPathPolicies(map[string]PathPolicy{
"/": {Deny: true},
"/etc": {},
"/root": {},
"/etc/fstab": {Deny: true},
"/etc/shadow": {Deny: true},
"/etc/passwd": {Deny: true},

View file

@ -1,15 +1,6 @@
// Package blueprint contains primitives for representing weldr blueprints
package blueprint
import (
"encoding/json"
"fmt"
"github.com/osbuild/images/pkg/crypt"
"github.com/coreos/go-semver/semver"
)
// A Blueprint is a high-level description of an image.
type Blueprint struct {
Name string `json:"name" toml:"name"`
@ -49,71 +40,6 @@ type Container struct {
TLSVerify *bool `json:"tls-verify,omitempty" toml:"tls-verify,omitempty"`
}
// DeepCopy returns a deep copy of the blueprint
// This uses json.Marshal and Unmarshal which are not very efficient
func (b *Blueprint) DeepCopy() Blueprint {
bpJSON, err := json.Marshal(b)
if err != nil {
panic(err)
}
var bp Blueprint
err = json.Unmarshal(bpJSON, &bp)
if err != nil {
panic(err)
}
return bp
}
// Initialize ensures that the blueprint has sane defaults for any missing fields
func (b *Blueprint) Initialize() error {
if len(b.Name) == 0 {
return fmt.Errorf("empty blueprint name not allowed")
}
if b.Packages == nil {
b.Packages = []Package{}
}
if b.Modules == nil {
b.Modules = []Package{}
}
if b.Groups == nil {
b.Groups = []Group{}
}
if b.Containers == nil {
b.Containers = []Container{}
}
if b.Version == "" {
b.Version = "0.0.0"
}
// Return an error if the version is not valid
_, err := semver.NewVersion(b.Version)
if err != nil {
return fmt.Errorf("Invalid 'version', must use Semantic Versioning: %s", err.Error())
}
err = b.CryptPasswords()
if err != nil {
return fmt.Errorf("Error hashing passwords: %s", err.Error())
}
return nil
}
// BumpVersion increments the previous blueprint's version
// If the old version string is not vaild semver it will use the new version as-is
// This assumes that the new blueprint's version has already been validated via Initialize
func (b *Blueprint) BumpVersion(old string) {
var ver *semver.Version
ver, err := semver.NewVersion(old)
if err != nil {
return
}
ver.BumpPatch()
b.Version = ver.String()
}
// packages, modules, and groups all resolve to rpm packages right now. This
// function returns a combined list of "name-version" strings.
func (b *Blueprint) GetPackages() []string {
@ -149,36 +75,3 @@ func (p Package) ToNameVersion() string {
return p.Name + "-" + p.Version
}
// CryptPasswords ensures that all blueprint passwords are hashed
func (b *Blueprint) CryptPasswords() error {
if b.Customizations == nil {
return nil
}
// Any passwords for users?
for i := range b.Customizations.User {
// Missing or empty password
if b.Customizations.User[i].Password == nil {
continue
}
// Prevent empty password from being hashed
if len(*b.Customizations.User[i].Password) == 0 {
b.Customizations.User[i].Password = nil
continue
}
if !crypt.PasswordIsCrypted(*b.Customizations.User[i].Password) {
pw, err := crypt.CryptSHA512(*b.Customizations.User[i].Password)
if err != nil {
return err
}
// Replace the password with the
b.Customizations.User[i].Password = &pw
}
}
return nil
}

View file

@ -3,6 +3,7 @@ package container
import (
"context"
"fmt"
"sort"
"strings"
)
@ -79,5 +80,8 @@ func (r *Resolver) Finish() ([]Spec, error) {
return specs, fmt.Errorf("failed to resolve container: %s", detail)
}
// Return a stable result, sorted by Digest
sort.Slice(specs, func(i, j int) bool { return specs[i].Digest < specs[j].Digest })
return specs, nil
}

View file

@ -46,7 +46,6 @@ var (
iotServices = []string{
"NetworkManager.service",
"firewalld.service",
"rngd.service",
"sshd.service",
"zezere_ignition.timer",
"zezere_ignition_banner.service",
@ -202,7 +201,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 5 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
@ -230,7 +229,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -259,7 +258,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
@ -277,7 +276,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},
@ -341,7 +340,7 @@ var (
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -200,7 +200,7 @@ func osCustomizations(
// IMAGES
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -208,7 +208,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], containers, customizations)
img.Environment = t.environment
@ -255,17 +255,6 @@ func liveInstallerImage(workload workload.Workload,
img := image.NewAnacondaLiveInstaller()
distro := t.Arch().Distro()
// If the live installer is generated for Fedora 39 or higher then we enable the web ui
// kernel options. This is a temporary thing as the check for this should really lie with
// anaconda and their `liveinst` script to determine which frontend to start.
if common.VersionLessThan(distro.Releasever(), "39") {
img.AdditionalKernelOpts = []string{}
} else {
img.AdditionalKernelOpts = []string{"inst.webui"}
}
img.Platform = t.platform
img.Workload = workload
img.ExtraBasePackages = packageSets[installerPkgsKey]
@ -345,6 +334,7 @@ func iotCommitImage(workload workload.Workload,
img.OSTreeParent = parentCommit
img.OSVersion = t.arch.distro.osVersion
img.Filename = t.Filename()
img.InstallWeakDeps = false
return img, nil
}

View file

@ -30,7 +30,7 @@ func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
}
func vhdCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
return rpmmd.PackageSet{
Include: []string{
"@core",
"chrony",
@ -48,12 +48,10 @@ func vhdCommonPackageSet(t *imageType) rpmmd.PackageSet {
"zram-generator-defaults",
},
}
return ps
}
func vmdkCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
return rpmmd.PackageSet{
Include: []string{
"@Fedora Cloud Server",
"chrony",
@ -74,139 +72,144 @@ func vmdkCommonPackageSet(t *imageType) rpmmd.PackageSet {
"extlinux-bootloader",
},
}
return ps
}
// fedora iot commit OS package set
func iotCommitPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"fedora-release-iot",
"glibc",
"glibc-minimal-langpack",
"nss-altfiles",
"sssd-client",
"libsss_sudo",
"shadow-utils",
"dracut-network",
"polkit",
"lvm2",
"cryptsetup",
"pinentry",
"keyutils",
"cracklib-dicts",
"e2fsprogs",
"xfsprogs",
"dosfstools",
"gnupg2",
"basesystem",
"python3",
"bash",
"xz",
"gzip",
"coreutils",
"which",
"curl",
"firewalld",
"iptables",
"NetworkManager",
"NetworkManager-wifi",
"NetworkManager-wwan",
"wpa_supplicant",
"iwd",
"tpm2-pkcs11",
"dnsmasq",
"traceroute",
"hostname",
"iproute",
"iputils",
"openssh-clients",
"openssh-server",
"passwd",
"policycoreutils",
"procps-ng",
"rootfiles",
"rpm",
"smartmontools-selinux",
"setup",
"shadow-utils",
"sudo",
"systemd",
"util-linux",
"vim-minimal",
"less",
"tar",
"fwupd",
"usbguard",
"greenboot",
"ignition",
"zezere-ignition",
"rsync",
"aardvark-dns",
"atheros-firmware",
"attr",
"ima-evm-utils",
"authselect",
"basesystem",
"bash",
"bash-completion",
"tmux",
"screen",
"policycoreutils-python-utils",
"setools-console",
"audit",
"rng-tools",
"brcmfmac-firmware",
"chrony",
"bluez",
"bluez-libs",
"bluez-mesh",
"kernel-tools",
"libgpiod-utils",
"podman",
"container-selinux",
"skopeo",
"criu",
"slirp4netns",
"fuse-overlayfs",
"clevis",
"clevis-dracut",
"clevis-luks",
"clevis-pin-tpm2",
"parsec",
"container-selinux",
"containernetworking-plugins",
"coreutils",
"cracklib-dicts",
"criu",
"cryptsetup",
"curl",
"dbus-parsec",
"iwl7260-firmware",
"iwlax2xx-firmware",
"dnsmasq",
"dosfstools",
"dracut-config-generic",
"dracut-network",
"e2fsprogs",
"efibootmgr",
"fedora-release-iot",
"firewalld",
"fwupd",
"fwupd-efi",
"fwupd-plugin-modem-manager",
"fwupd-plugin-uefi-capsule-data",
"glibc",
"glibc-minimal-langpack",
"gnupg2",
"greenboot",
"greenboot-default-health-checks",
"gzip",
"hostname",
"ignition",
"ima-evm-utils",
"iproute",
"iputils",
"iwd",
"iwlwifi-mvm-firmware",
"kernel-tools",
"keyutils",
"less",
"libsss_sudo",
"linux-firmware",
"lvm2",
"netavark",
"NetworkManager",
"NetworkManager-wifi",
"NetworkManager-wwan",
"nss-altfiles",
"openssl",
"openssh-clients",
"openssh-server",
"parsec",
"passwd",
"pinentry",
"podman",
"podman-plugins",
"policycoreutils",
"policycoreutils-python-utils",
"polkit",
"procps-ng",
"realtek-firmware",
"rootfiles",
"rpm",
"screen",
"selinux-policy-targeted",
"setools-console",
"setup",
"shadow-utils",
"skopeo",
"slirp4netns",
"sssd-client",
"sudo",
"systemd",
"systemd-resolved",
"tar",
"tmux",
"tpm2-pkcs11",
"traceroute",
"usbguard",
"util-linux",
"vim-minimal",
"wpa_supplicant",
"wireless-regdb",
"xfsprogs",
"xz",
"zezere-ignition",
"zram-generator",
},
}
return ps
if !common.VersionLessThan(t.arch.distro.osVersion, "38") {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"fdo-client", // added in F38
},
})
}
return ps
}
// INSTALLER PACKAGE SET
func installerPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
return rpmmd.PackageSet{
Include: []string{
"anaconda-dracut",
"atheros-firmware",
"brcmfmac-firmware",
"curl",
"dracut-config-generic",
"dracut-network",
"hostname",
"iwl100-firmware",
"iwl1000-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"iwlwifi-dvm-firmware",
"iwlwifi-mvm-firmware",
"kernel",
"linux-firmware",
"less",
"nfs-utils",
"openssh-clients",
"ostree",
"plymouth",
"realtek-firmware",
"rng-tools",
"rpcbind",
"selinux-policy-targeted",
@ -216,8 +219,6 @@ func installerPackageSet(t *imageType) rpmmd.PackageSet {
"xz",
},
}
return ps
}
func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
@ -235,9 +236,11 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
"anaconda-dracut",
"anaconda-install-env-deps",
"anaconda-widgets",
"atheros-firmware",
"audit",
"bind-utils",
"bitmap-fangsongti-fonts",
"brcmfmac-firmware",
"bzip2",
"cryptsetup",
"curl",
@ -268,19 +271,8 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
"hostname",
"initscripts",
"ipmitool",
"iwl1000-firmware",
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6000g2a-firmware",
"iwl6000g2b-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"iwlwifi-dvm-firmware",
"iwlwifi-mvm-firmware",
"jomolhari-fonts",
"kacst-farsi-fonts",
"kacst-qurn-fonts",
@ -325,6 +317,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
"plymouth",
"python3-pyatspi",
"rdma-core",
"realtek-firmware",
"rit-meera-new-fonts",
"rng-tools",
"rpcbind",
@ -486,7 +479,6 @@ func containerPackageSet(t *imageType) rpmmd.PackageSet {
"dnf-yum",
"dnf",
"fedora-release-container",
"fedora-repos-modular",
"glibc-minimal-langpack",
"rootfiles",
"rpm",
@ -529,6 +521,14 @@ func containerPackageSet(t *imageType) rpmmd.PackageSet {
},
}
if common.VersionLessThan(t.arch.distro.osVersion, "39") {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"fedora-repos-modular",
},
})
}
return ps
}

View file

@ -25,7 +25,7 @@ var azureRhuiImgType = imageType{
kernelOptions: "ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y",
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},

View file

@ -216,7 +216,7 @@ func osCustomizations(
return osc
}
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -224,7 +224,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], options, containers, customizations)
img.Environment = t.environment

View file

@ -19,7 +19,7 @@ var qcow2ImgType = imageType{
defaultImageConfig: qcow2DefaultImgConfig,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},

View file

@ -20,7 +20,7 @@ func amiImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -49,7 +49,7 @@ func ec2ImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -77,7 +77,7 @@ func ec2HaImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -98,7 +98,7 @@ func amiImgTypeAarch64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -126,7 +126,7 @@ func ec2ImgTypeAarch64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0 crashkernel=auto",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -154,7 +154,7 @@ func ec2SapImgTypeX86_64(rd distribution) imageType {
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto processor.max_cstate=1 intel_idle.max_cstate=1",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -26,7 +26,7 @@ func azureRhuiImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},
@ -47,7 +47,7 @@ func azureSapRhuiImgType(rd distribution) imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},
@ -67,7 +67,7 @@ func azureByosImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -88,7 +88,7 @@ func azureImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -110,7 +110,7 @@ func azureEap7RhuiImgType() imageType {
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},

View file

@ -125,7 +125,7 @@ func (d *distribution) getDefaultImageConfig() *distro.ImageConfig {
// New creates a new distro object, defining the supported architectures and image types
func New() distro.Distro {
// default minor: create default minor version (current GA) and rename it
d := newDistro("rhel", 7)
d := newDistro("rhel", 8)
d.name = "rhel-8"
return d

View file

@ -152,7 +152,7 @@ func minimalRawImgType(rd distribution) imageType {
kernelOptions: "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0",
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -22,7 +22,7 @@ func gceImgType(rd distribution) imageType {
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},
@ -43,7 +43,7 @@ func gceRhuiImgType(rd distribution) imageType {
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},

View file

@ -227,7 +227,7 @@ func osCustomizations(
return osc
}
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -235,7 +235,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], options, containers, customizations)
img.Environment = t.environment

View file

@ -22,7 +22,7 @@ func qcow2ImgType(rd distribution) imageType {
},
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
@ -58,7 +58,7 @@ func openstackImgType() imageType {
kernelOptions: "ro net.ifnames=0",
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},

View file

@ -18,7 +18,7 @@ func vmdkImgType() imageType {
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
@ -37,7 +37,7 @@ func ovaImgType() imageType {
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},

View file

@ -21,7 +21,7 @@ var (
kernelOptions: amiKernelOptions,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -39,7 +39,7 @@ var (
kernelOptions: amiKernelOptions,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -58,7 +58,7 @@ var (
kernelOptions: amiKernelOptions,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -76,7 +76,7 @@ var (
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image"},
exports: []string{"image"},
@ -95,7 +95,7 @@ var (
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
@ -114,7 +114,7 @@ var (
kernelOptions: "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 processor.max_cstate=1 intel_idle.max_cstate=1",
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -23,7 +23,7 @@ var (
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -42,7 +42,7 @@ var (
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
@ -62,7 +62,7 @@ var (
kernelOptions: defaultAzureKernelOptions,
bootable: true,
defaultSize: 64 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},

View file

@ -126,7 +126,7 @@ func (d *distribution) getDefaultImageConfig() *distro.ImageConfig {
func New() distro.Distro {
// default minor: create default minor version (current GA) and rename it
d := newDistro("rhel", 1)
d := newDistro("rhel", 2)
d.name = "rhel-9"
return d
}

View file

@ -184,7 +184,7 @@ var (
kernelOptions: "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0",
bootable: true,
defaultSize: 2 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},

View file

@ -21,7 +21,7 @@ var (
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},
@ -39,7 +39,7 @@ var (
kernelOptions: gceKernelOptions,
bootable: true,
defaultSize: 20 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "archive"},
exports: []string{"archive"},

View file

@ -224,7 +224,7 @@ func osCustomizations(
return osc
}
func liveImage(workload workload.Workload,
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
@ -232,7 +232,7 @@ func liveImage(workload workload.Workload,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewLiveImage()
img := image.NewDiskImage()
img.Platform = t.platform
img.OSCustomizations = osCustomizations(t, packageSets[osPkgsKey], options, containers, customizations)
img.Environment = t.environment

View file

@ -23,7 +23,7 @@ var (
kernelOptions: "ro net.ifnames=0",
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
@ -162,7 +162,7 @@ func mkQcow2ImgType(d distribution) imageType {
},
bootable: true,
defaultSize: 10 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},

View file

@ -22,7 +22,7 @@ var vmdkImgType = imageType{
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
@ -42,7 +42,7 @@ var ovaImgType = imageType{
kernelOptions: vmdkKernelOptions,
bootable: true,
defaultSize: 4 * common.GibiByte,
image: liveImage,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},

View file

@ -16,7 +16,7 @@ import (
"github.com/osbuild/images/pkg/runner"
)
type LiveImage struct {
type DiskImage struct {
Base
Platform platform.Platform
PartitionTable *disk.PartitionTable
@ -34,14 +34,14 @@ type LiveImage struct {
OSNick string
}
func NewLiveImage() *LiveImage {
return &LiveImage{
Base: NewBase("live-image"),
func NewDiskImage() *DiskImage {
return &DiskImage{
Base: NewBase("disk"),
PartTool: osbuild.PTSfdisk,
}
}
func (img *LiveImage) InstantiateManifest(m *manifest.Manifest,
func (img *DiskImage) InstantiateManifest(m *manifest.Manifest,
repos []rpmmd.RepoConfig,
runner runner.Runner,
rng *rand.Rand) (*artifact.Artifact, error) {

View file

@ -29,12 +29,15 @@ type OSTreeArchive struct {
OSVersion string
Filename string
InstallWeakDeps bool
}
func NewOSTreeArchive(ref string) *OSTreeArchive {
return &OSTreeArchive{
Base: NewBase("ostree-archive"),
OSTreeRef: ref,
Base: NewBase("ostree-archive"),
OSTreeRef: ref,
InstallWeakDeps: true,
}
}
@ -51,6 +54,7 @@ func (img *OSTreeArchive) InstantiateManifest(m *manifest.Manifest,
osPipeline.Workload = img.Workload
osPipeline.OSTreeParent = img.OSTreeParent
osPipeline.OSTreeRef = img.OSTreeRef
osPipeline.InstallWeakDeps = img.InstallWeakDeps
ostreeCommitPipeline := manifest.NewOSTreeCommit(m, buildPipeline, osPipeline, img.OSTreeRef)
ostreeCommitPipeline.OSVersion = img.OSVersion

View file

@ -145,8 +145,9 @@ func (p *AnacondaInstaller) getPackageSetChain(Distro) []rpmmd.PackageSet {
}
return []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
InstallWeakDeps: true,
},
}
}

View file

@ -59,8 +59,9 @@ func (p *Build) getPackageSetChain(distro Distro) []rpmmd.PackageSet {
return []rpmmd.PackageSet{
{
Include: packages,
Repositories: p.repos,
Include: packages,
Repositories: p.repos,
InstallWeakDeps: true,
},
}
}

View file

@ -66,8 +66,9 @@ func (p *OSTreeCommitServer) getPackageSetChain(Distro) []rpmmd.PackageSet {
packages := []string{"nginx"}
return []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
InstallWeakDeps: true,
},
}
}

View file

@ -113,8 +113,9 @@ func (p *CoreOSInstaller) getPackageSetChain(Distro) []rpmmd.PackageSet {
packages := p.getBootPackages()
return []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
Include: append(packages, p.ExtraPackages...),
Repositories: append(p.repos, p.ExtraRepos...),
InstallWeakDeps: true,
},
}
}

View file

@ -164,6 +164,8 @@ type OS struct {
OSProduct string
OSVersion string
OSNick string
InstallWeakDeps bool
}
// NewOS creates a new OS pipeline. build is the build pipeline to use for
@ -175,9 +177,10 @@ func NewOS(m *Manifest,
repos []rpmmd.RepoConfig) *OS {
name := "os"
p := &OS{
Base: NewBase(m, name, buildPipeline),
repos: filterRepos(repos, name),
platform: platform,
Base: NewBase(m, name, buildPipeline),
repos: filterRepos(repos, name),
platform: platform,
InstallWeakDeps: true,
}
buildPipeline.addDependent(p)
m.addPipeline(p)
@ -227,11 +230,13 @@ func (p *OS) getPackageSetChain(Distro) []rpmmd.PackageSet {
}
osRepos := append(p.repos, p.ExtraBaseRepos...)
chain := []rpmmd.PackageSet{
{
Include: append(packages, p.ExtraBasePackages...),
Exclude: p.ExcludeBasePackages,
Repositories: osRepos,
Include: append(packages, p.ExtraBasePackages...),
Exclude: p.ExcludeBasePackages,
Repositories: osRepos,
InstallWeakDeps: p.InstallWeakDeps,
},
}

View file

@ -123,9 +123,10 @@ func (pkg Package) ToPackageInfo() PackageInfo {
// to exclude. The Repositories are used when depsolving this package set in
// addition to the base repositories.
type PackageSet struct {
Include []string
Exclude []string
Repositories []RepoConfig
Include []string
Exclude []string
Repositories []RepoConfig
InstallWeakDeps bool
}
// Append the Include and Exclude package list from another PackageSet and