build(deps): bump the go-deps group with 5 updates

Bumps the go-deps group with 5 updates:

| Package | From | To |
| --- | --- | --- |
| [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) | `1.48.1` | `1.48.13` |
| [github.com/gophercloud/gophercloud](https://github.com/gophercloud/gophercloud) | `1.7.0` | `1.8.0` |
| [github.com/openshift-online/ocm-sdk-go](https://github.com/openshift-online/ocm-sdk-go) | `0.1.385` | `0.1.388` |
| [github.com/osbuild/images](https://github.com/osbuild/images) | `0.18.0` | `0.21.0` |
| [google.golang.org/api](https://github.com/googleapis/google-api-go-client) | `0.152.0` | `0.153.0` |


Updates `github.com/aws/aws-sdk-go` from 1.48.1 to 1.48.13
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.48.1...v1.48.13)

Updates `github.com/gophercloud/gophercloud` from 1.7.0 to 1.8.0
- [Release notes](https://github.com/gophercloud/gophercloud/releases)
- [Changelog](https://github.com/gophercloud/gophercloud/blob/v1.8.0/CHANGELOG.md)
- [Commits](https://github.com/gophercloud/gophercloud/compare/v1.7.0...v1.8.0)

Updates `github.com/openshift-online/ocm-sdk-go` from 0.1.385 to 0.1.388
- [Release notes](https://github.com/openshift-online/ocm-sdk-go/releases)
- [Changelog](https://github.com/openshift-online/ocm-sdk-go/blob/main/CHANGES.md)
- [Commits](https://github.com/openshift-online/ocm-sdk-go/compare/v0.1.385...v0.1.388)

Updates `github.com/osbuild/images` from 0.18.0 to 0.21.0
- [Release notes](https://github.com/osbuild/images/releases)
- [Commits](https://github.com/osbuild/images/compare/v0.18.0...v0.21.0)

Updates `google.golang.org/api` from 0.152.0 to 0.153.0
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.152.0...v0.153.0)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: go-deps
- dependency-name: github.com/gophercloud/gophercloud
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
- dependency-name: github.com/openshift-online/ocm-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: go-deps
- dependency-name: github.com/osbuild/images
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
- dependency-name: google.golang.org/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2023-12-06 05:11:45 +00:00 committed by Tomáš Hozza
parent d3dd83aceb
commit 016051a4b8
105 changed files with 7698 additions and 2795 deletions

32
go.mod
View file

@ -12,7 +12,7 @@ require (
github.com/Azure/go-autorest/autorest v0.11.29
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
github.com/BurntSushi/toml v1.3.2
github.com/aws/aws-sdk-go v1.48.1
github.com/aws/aws-sdk-go v1.48.13
github.com/coreos/go-semver v0.3.1
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/deepmap/oapi-codegen v1.8.2
@ -21,7 +21,7 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.4.0
github.com/gophercloud/gophercloud v1.7.0
github.com/gophercloud/gophercloud v1.8.0
github.com/hashicorp/go-retryablehttp v0.7.5
github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v4 v4.18.1
@ -29,9 +29,9 @@ require (
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
github.com/labstack/echo/v4 v4.11.3
github.com/labstack/gommon v0.4.1
github.com/openshift-online/ocm-sdk-go v0.1.385
github.com/openshift-online/ocm-sdk-go v0.1.388
github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.18.0
github.com/osbuild/images v0.21.0
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1
github.com/osbuild/pulp-client v0.1.0
github.com/prometheus/client_golang v1.17.0
@ -42,16 +42,16 @@ require (
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453
github.com/vmware/govmomi v0.33.1
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/oauth2 v0.14.0
golang.org/x/oauth2 v0.15.0
golang.org/x/sync v0.5.0
golang.org/x/sys v0.14.0
google.golang.org/api v0.151.0
golang.org/x/sys v0.15.0
google.golang.org/api v0.153.0
)
require (
cloud.google.com/go v0.110.8 // indirect
cloud.google.com/go v0.110.10 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.3 // indirect
cloud.google.com/go/iam v1.1.5 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
@ -167,18 +167,18 @@ require (
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.15.0 // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/term v0.14.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.14.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect

64
go.sum
View file

@ -1,12 +1,12 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc=
cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE=
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w=
cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
@ -61,8 +61,8 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.48.1 h1:OXPUVL4cLdsDsqkVIuhwY+D389tjI7e1xu0lsDYyeMk=
github.com/aws/aws-sdk-go v1.48.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.48.13 h1:6N4GTme6MpxfCisWf5pql8k3TBORiKTmbeutZCDXlG8=
github.com/aws/aws-sdk-go v1.48.13/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -266,8 +266,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs=
github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk=
github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
@ -449,12 +449,12 @@ github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQB
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openshift-online/ocm-sdk-go v0.1.385 h1:EZs7CDfxtJEwywCERdNX6rApyFaJ+kB1W6nk3jROPwM=
github.com/openshift-online/ocm-sdk-go v0.1.385/go.mod h1:/+VFIw1iW2H0jEkFH4GnbL/liWareyzsL0w7mDIudB4=
github.com/openshift-online/ocm-sdk-go v0.1.388 h1:c8yPCUQwJm3QhcVmnyMPFpeDtxPBaPeYh5hLv1vg9YQ=
github.com/openshift-online/ocm-sdk-go v0.1.388/go.mod h1:/+VFIw1iW2H0jEkFH4GnbL/liWareyzsL0w7mDIudB4=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.18.0 h1:I/tOO7DCECciJptrXVq+oykJI5dP1rwkzJqmf2rKuqw=
github.com/osbuild/images v0.18.0/go.mod h1:Zr+AkaX/Rpxyff6Zxh8kkwGKFtJsSukGo1Vv/j9HsxA=
github.com/osbuild/images v0.21.0 h1:xqW7Y6F+ihoL8x2J+S3nGDRXIqZPq//c0Q8ny3afdpo=
github.com/osbuild/images v0.21.0/go.mod h1:HtKiCjR4gQcqcd8E7i37orlFqhsjZmFCvyM89E3aeos=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1 h1:UFEJIcPa46W8gtWgOYzriRKYyy1t6SWL0BI7fPTuVvc=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1/go.mod h1:z+WA+dX6qMwc7fqY5jCzESDIlg4WR2sBQezxsoXv9Ik=
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=
@ -612,8 +612,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
@ -642,11 +642,11 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -685,14 +685,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -706,8 +706,8 @@ golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -736,8 +736,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.151.0 h1:FhfXLO/NFdJIzQtCqjpysWwqKk8AzGWBUhMIx67cVDU=
google.golang.org/api v0.151.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg=
google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
@ -745,12 +745,12 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA=
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=

View file

@ -1,6 +1,20 @@
# Changes
## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01)
### Bug Fixes
* **iam:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880))
## [1.1.4](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.3...iam/v1.1.4) (2023-10-26)
### Bug Fixes
* **iam:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
## [1.1.3](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.2...iam/v1.1.3) (2023-10-12)

View file

@ -89,6 +89,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/alloydb/connectors/apiv1": {
"api_shortname": "connectors",
"distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1",
"description": "AlloyDB connectors",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/alloydb/connectors/apiv1alpha": {
"api_shortname": "connectors",
"distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha",
@ -99,6 +109,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/alloydb/connectors/apiv1beta": {
"api_shortname": "connectors",
"distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1beta",
"description": "AlloyDB connectors",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1beta",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/analytics/admin/apiv1alpha": {
"api_shortname": "analyticsadmin",
"distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha",
@ -136,7 +156,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1",
"release_level": "preview",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/apikeys/apiv2": {
@ -356,7 +376,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1",
"release_level": "preview",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/bigquery/biglake/apiv1alpha1": {
@ -715,7 +735,7 @@
"description": "Container Analysis API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/containeranalysis/apiv1beta1",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
@ -952,7 +972,7 @@
"cloud.google.com/go/dlp/apiv2": {
"api_shortname": "dlp",
"distribution_name": "cloud.google.com/go/dlp/apiv2",
"description": "Cloud Data Loss Prevention (DLP) API",
"description": "Cloud Data Loss Prevention (DLP)",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2",
@ -1286,7 +1306,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2",
"release_level": "preview",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/lifesciences/apiv2beta": {
@ -1349,6 +1369,26 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/maps/fleetengine/apiv1": {
"api_shortname": "fleetengine",
"distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1",
"description": "Local Rides and Deliveries API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/apiv1",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/maps/fleetengine/delivery/apiv1": {
"api_shortname": "fleetengine",
"distribution_name": "cloud.google.com/go/maps/fleetengine/delivery/apiv1",
"description": "Last Mile Fleet Solution Delivery API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/delivery/apiv1",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": {
"api_shortname": "mapsplatformdatasets",
"distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha",
@ -1556,7 +1596,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2",
"release_level": "preview",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/optimization/apiv1": {
@ -1676,7 +1716,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1",
"release_level": "preview",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/policytroubleshooter/apiv1": {
@ -1696,7 +1736,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3",
"release_level": "preview",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/privatecatalog/apiv1beta1": {
@ -1839,6 +1879,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/redis/cluster/apiv1": {
"api_shortname": "redis",
"distribution_name": "cloud.google.com/go/redis/cluster/apiv1",
"description": "Google Cloud Memorystore for Redis API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/resourcemanager/apiv2": {
"api_shortname": "cloudresourcemanager",
"distribution_name": "cloud.google.com/go/resourcemanager/apiv2",
@ -1949,6 +1999,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/securesourcemanager/apiv1": {
"api_shortname": "securesourcemanager",
"distribution_name": "cloud.google.com/go/securesourcemanager/apiv1",
"description": "Secure Source Manager API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/security/privateca/apiv1": {
"api_shortname": "privateca",
"distribution_name": "cloud.google.com/go/security/privateca/apiv1",
@ -2069,6 +2129,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/shopping/merchant/inventories/apiv1beta": {
"api_shortname": "merchantapi",
"distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta",
"description": "Merchant API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/inventories/apiv1beta",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/spanner": {
"api_shortname": "spanner",
"distribution_name": "cloud.google.com/go/spanner",
@ -2275,8 +2345,8 @@
"description": "Video Stitcher API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/video/stitcher/apiv1",
"release_level": "preview",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/video/transcoder/apiv1": {

View file

@ -755,6 +755,13 @@ var awsPartition = partition{
},
},
},
"agreement-marketplace": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-east-1",
}: endpoint{},
},
},
"airflow": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -7743,6 +7750,18 @@ var awsPartition = partition{
},
},
},
"cost-optimization-hub": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-east-1",
}: endpoint{
Hostname: "cost-optimization-hub.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
},
},
"cur": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -22476,6 +22495,161 @@ var awsPartition = partition{
}: endpoint{},
},
},
"qbusiness": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.aws",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{
Hostname: "qbusiness.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
}: endpoint{
Hostname: "qbusiness.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
Hostname: "qbusiness.ap-northeast-1.api.aws",
},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{
Hostname: "qbusiness.ap-northeast-2.api.aws",
},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{
Hostname: "qbusiness.ap-northeast-3.api.aws",
},
endpointKey{
Region: "ap-south-1",
}: endpoint{
Hostname: "qbusiness.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-south-2",
}: endpoint{
Hostname: "qbusiness.ap-south-2.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
Hostname: "qbusiness.ap-southeast-1.api.aws",
},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{
Hostname: "qbusiness.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{
Hostname: "qbusiness.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
Hostname: "qbusiness.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
}: endpoint{
Hostname: "qbusiness.ca-central-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{
Hostname: "qbusiness.eu-central-1.api.aws",
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "qbusiness.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
}: endpoint{
Hostname: "qbusiness.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
}: endpoint{
Hostname: "qbusiness.eu-south-1.api.aws",
},
endpointKey{
Region: "eu-south-2",
}: endpoint{
Hostname: "qbusiness.eu-south-2.api.aws",
},
endpointKey{
Region: "eu-west-1",
}: endpoint{
Hostname: "qbusiness.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
}: endpoint{
Hostname: "qbusiness.eu-west-2.api.aws",
},
endpointKey{
Region: "eu-west-3",
}: endpoint{
Hostname: "qbusiness.eu-west-3.api.aws",
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "qbusiness.il-central-1.api.aws",
},
endpointKey{
Region: "me-central-1",
}: endpoint{
Hostname: "qbusiness.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
}: endpoint{
Hostname: "qbusiness.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
}: endpoint{
Hostname: "qbusiness.sa-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
}: endpoint{
Hostname: "qbusiness.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-2",
}: endpoint{
Hostname: "qbusiness.us-east-2.api.aws",
},
endpointKey{
Region: "us-west-1",
}: endpoint{
Hostname: "qbusiness.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-2",
}: endpoint{
Hostname: "qbusiness.us-west-2.api.aws",
},
},
},
"qldb": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -27438,6 +27612,38 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-verification-us-east-1",
}: endpoint{
Hostname: "verification.signer-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
Region: "fips-verification-us-east-2",
}: endpoint{
Hostname: "verification.signer-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
endpointKey{
Region: "fips-verification-us-west-1",
}: endpoint{
Hostname: "verification.signer-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
},
endpointKey{
Region: "fips-verification-us-west-2",
}: endpoint{
Hostname: "verification.signer-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -27480,6 +27686,166 @@ var awsPartition = partition{
}: endpoint{
Hostname: "signer-fips.us-west-2.amazonaws.com",
},
endpointKey{
Region: "verification-af-south-1",
}: endpoint{
Hostname: "verification.signer.af-south-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "af-south-1",
},
},
endpointKey{
Region: "verification-ap-east-1",
}: endpoint{
Hostname: "verification.signer.ap-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-east-1",
},
},
endpointKey{
Region: "verification-ap-northeast-1",
}: endpoint{
Hostname: "verification.signer.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-northeast-1",
},
},
endpointKey{
Region: "verification-ap-northeast-2",
}: endpoint{
Hostname: "verification.signer.ap-northeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-northeast-2",
},
},
endpointKey{
Region: "verification-ap-south-1",
}: endpoint{
Hostname: "verification.signer.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-south-1",
},
},
endpointKey{
Region: "verification-ap-southeast-1",
}: endpoint{
Hostname: "verification.signer.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-1",
},
},
endpointKey{
Region: "verification-ap-southeast-2",
}: endpoint{
Hostname: "verification.signer.ap-southeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-2",
},
},
endpointKey{
Region: "verification-ca-central-1",
}: endpoint{
Hostname: "verification.signer.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
},
endpointKey{
Region: "verification-eu-central-1",
}: endpoint{
Hostname: "verification.signer.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-central-1",
},
},
endpointKey{
Region: "verification-eu-north-1",
}: endpoint{
Hostname: "verification.signer.eu-north-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-north-1",
},
},
endpointKey{
Region: "verification-eu-south-1",
}: endpoint{
Hostname: "verification.signer.eu-south-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-south-1",
},
},
endpointKey{
Region: "verification-eu-west-1",
}: endpoint{
Hostname: "verification.signer.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-1",
},
},
endpointKey{
Region: "verification-eu-west-2",
}: endpoint{
Hostname: "verification.signer.eu-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-2",
},
},
endpointKey{
Region: "verification-eu-west-3",
}: endpoint{
Hostname: "verification.signer.eu-west-3.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-3",
},
},
endpointKey{
Region: "verification-me-south-1",
}: endpoint{
Hostname: "verification.signer.me-south-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "me-south-1",
},
},
endpointKey{
Region: "verification-sa-east-1",
}: endpoint{
Hostname: "verification.signer.sa-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "sa-east-1",
},
},
endpointKey{
Region: "verification-us-east-1",
}: endpoint{
Hostname: "verification.signer.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
Region: "verification-us-east-2",
}: endpoint{
Hostname: "verification.signer.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
endpointKey{
Region: "verification-us-west-1",
}: endpoint{
Hostname: "verification.signer.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
},
endpointKey{
Region: "verification-us-west-2",
}: endpoint{
Hostname: "verification.signer.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
},
},
"simspaceweaver": service{
@ -29955,6 +30321,31 @@ var awsPartition = partition{
},
},
},
"thinclient": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
"tnb": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -32590,6 +32981,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@ -34186,6 +34580,31 @@ var awscnPartition = partition{
},
},
},
"qbusiness": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.amazonwebservices.com.cn",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.amazonwebservices.com.cn",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{
Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn",
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
},
"ram": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -34226,6 +34645,13 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"redshift-serverless": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
},
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -34527,6 +34953,22 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
endpointKey{
Region: "verification-cn-north-1",
}: endpoint{
Hostname: "verification.signer.cn-north-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-north-1",
},
},
endpointKey{
Region: "verification-cn-northwest-1",
}: endpoint{
Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
},
},
},
"sms": service{
@ -35590,6 +36032,16 @@ var awsusgovPartition = partition{
},
},
},
"arc-zonal-shift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
},
},
"athena": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -36710,6 +37162,46 @@ var awsusgovPartition = partition{
},
},
},
"drs": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
},
},
},
"ds": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -39240,6 +39732,31 @@ var awsusgovPartition = partition{
},
},
},
"qbusiness": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.aws",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
Hostname: "qbusiness.us-gov-east-1.api.aws",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
Hostname: "qbusiness.us-gov-west-1.api.aws",
},
},
},
"quicksight": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -41523,6 +42040,28 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
"datasync": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-iso-west-1",
}: endpoint{
Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
},
},
},
"directconnect": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -42039,22 +42578,136 @@ var awsisoPartition = partition{
},
"rds": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "rds-fips.us-iso-east-1",
}: endpoint{
Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "rds-fips.us-iso-west-1",
}: endpoint{
Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "rds.us-iso-east-1",
}: endpoint{
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "rds.us-iso-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "rds.us-iso-west-1",
}: endpoint{
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "rds.us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
endpointKey{
Region: "us-iso-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov",
},
endpointKey{
Region: "us-iso-east-1-fips",
}: endpoint{
Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov",
},
endpointKey{
Region: "us-iso-west-1-fips",
}: endpoint{
Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
},
},
"redshift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-iso-east-1",
}: endpoint{
Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-iso-west-1",
}: endpoint{
Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
endpointKey{
Region: "us-iso-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov",
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov",
},
},
},
"resource-groups": service{
@ -42774,16 +43427,73 @@ var awsisobPartition = partition{
},
"rds": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "rds-fips.us-isob-east-1",
}: endpoint{
Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "rds.us-isob-east-1",
}: endpoint{
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "rds.us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov",
},
endpointKey{
Region: "us-isob-east-1-fips",
}: endpoint{
Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
},
},
"redshift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-isob-east-1",
}: endpoint{
Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov",
},
},
},
"resource-groups": service{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.48.1"
const SDKVersion = "1.48.13"

View file

@ -81066,15 +81066,15 @@ type CreateVolumeInput struct {
//
// The following are the supported values for each volume type:
//
// * gp3: 3,000-16,000 IOPS
// * gp3: 3,000 - 16,000 IOPS
//
// * io1: 100-64,000 IOPS
// * io1: 100 - 64,000 IOPS
//
// * io2: 100-64,000 IOPS
// * io2: 100 - 256,000 IOPS
//
// io1 and io2 volumes support up to 64,000 IOPS only on Instances built on
// For io2 volumes, you can achieve up to 256,000 IOPS on instances built on
// the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// Other instance families support performance up to 32,000 IOPS.
// On other instances, you can achieve performance up to 32,000 IOPS.
//
// This parameter is required for io1 and io2 volumes. The default for gp3 volumes
// is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard
@ -81118,13 +81118,15 @@ type CreateVolumeInput struct {
//
// The following are the supported volumes sizes for each volume type:
//
// * gp2 and gp3: 1-16,384
// * gp2 and gp3: 1 - 16,384 GiB
//
// * io1 and io2: 4-16,384
// * io1: 4 - 16,384 GiB
//
// * st1 and sc1: 125-16,384
// * io2: 4 - 65,536 GiB
//
// * standard: 1-1,024
// * st1 and sc1: 125 - 16,384 GiB
//
// * standard: 1 - 1024 GiB
Size *int64 `type:"integer"`
// The snapshot from which to create the volume. You must specify either a snapshot
@ -106400,8 +106402,8 @@ type DescribeSpotInstanceRequestsInput struct {
// in GiB.
//
// * launch.block-device-mapping.volume-type - The type of EBS volume: gp2
// for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for
// Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic.
// or gp3 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1
// for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic.
//
// * launch.group-id - The ID of the security group for the instance.
//
@ -116785,19 +116787,18 @@ type EbsBlockDevice struct {
//
// The following are the supported values for each volume type:
//
// * gp3: 3,000-16,000 IOPS
// * gp3: 3,000 - 16,000 IOPS
//
// * io1: 100-64,000 IOPS
// * io1: 100 - 64,000 IOPS
//
// * io2: 100-64,000 IOPS
// * io2: 100 - 256,000 IOPS
//
// For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built
// on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// Other instance families guarantee performance up to 32,000 IOPS.
// For io2 volumes, you can achieve up to 256,000 IOPS on instances built on
// the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// On other instances, you can achieve performance up to 32,000 IOPS.
//
// This parameter is required for io1 and io2 volumes. The default for gp3 volumes
// is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard
// volumes.
// is 3,000 IOPS.
Iops *int64 `locationName:"iops" type:"integer"`
// Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed
@ -116829,20 +116830,21 @@ type EbsBlockDevice struct {
// You can specify a volume size that is equal to or larger than the snapshot
// size.
//
// The following are the supported volumes sizes for each volume type:
// The following are the supported sizes for each volume type:
//
// * gp2 and gp3:1-16,384
// * gp2 and gp3: 1 - 16,384 GiB
//
// * io1 and io2: 4-16,384
// * io1: 4 - 16,384 GiB
//
// * st1 and sc1: 125-16,384
// * io2: 4 - 65,536 GiB
//
// * standard: 1-1,024
// * st1 and sc1: 125 - 16,384 GiB
//
// * standard: 1 - 1024 GiB
VolumeSize *int64 `locationName:"volumeSize" type:"integer"`
// The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
// in the Amazon EC2 User Guide. If the volume type is io1 or io2, you must
// specify the IOPS that the volume supports.
// in the Amazon EC2 User Guide.
VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"`
}
@ -136121,11 +136123,13 @@ type InstanceRequirements struct {
// Indicates whether instance types must have accelerators by specific manufacturers.
//
// * For instance types with NVIDIA devices, specify nvidia.
// * For instance types with Amazon Web Services devices, specify amazon-web-services.
//
// * For instance types with AMD devices, specify amd.
//
// * For instance types with Amazon Web Services devices, specify amazon-web-services.
// * For instance types with Habana devices, specify habana.
//
// * For instance types with NVIDIA devices, specify nvidia.
//
// * For instance types with Xilinx devices, specify xilinx.
//
@ -136134,25 +136138,31 @@ type InstanceRequirements struct {
// The accelerators that must be on the instance type.
//
// * For instance types with NVIDIA A10G GPUs, specify a10g.
//
// * For instance types with NVIDIA A100 GPUs, specify a100.
//
// * For instance types with NVIDIA V100 GPUs, specify v100.
//
// * For instance types with NVIDIA K80 GPUs, specify k80.
//
// * For instance types with NVIDIA T4 GPUs, specify t4.
//
// * For instance types with NVIDIA M60 GPUs, specify m60.
//
// * For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520.
//
// * For instance types with Xilinx VU9P FPGAs, specify vu9p.
// * For instance types with NVIDIA H100 GPUs, specify h100.
//
// * For instance types with Amazon Web Services Inferentia chips, specify
// inferentia.
//
// * For instance types with NVIDIA GRID K520 GPUs, specify k520.
//
// * For instance types with NVIDIA K80 GPUs, specify k80.
//
// * For instance types with NVIDIA M60 GPUs, specify m60.
//
// * For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520.
//
// * For instance types with NVIDIA T4 GPUs, specify t4.
//
// * For instance types with NVIDIA T4G GPUs, specify t4g.
//
// * For instance types with Xilinx VU9P FPGAs, specify vu9p.
//
// * For instance types with NVIDIA V100 GPUs, specify v100.
//
// Default: Any accelerator
AcceleratorNames []*string `locationName:"acceleratorNameSet" locationNameList:"item" type:"list" enum:"AcceleratorName"`
@ -136565,11 +136575,13 @@ type InstanceRequirementsRequest struct {
// Indicates whether instance types must have accelerators by specific manufacturers.
//
// * For instance types with NVIDIA devices, specify nvidia.
// * For instance types with Amazon Web Services devices, specify amazon-web-services.
//
// * For instance types with AMD devices, specify amd.
//
// * For instance types with Amazon Web Services devices, specify amazon-web-services.
// * For instance types with Habana devices, specify habana.
//
// * For instance types with NVIDIA devices, specify nvidia.
//
// * For instance types with Xilinx devices, specify xilinx.
//
@ -136578,25 +136590,31 @@ type InstanceRequirementsRequest struct {
// The accelerators that must be on the instance type.
//
// * For instance types with NVIDIA A10G GPUs, specify a10g.
//
// * For instance types with NVIDIA A100 GPUs, specify a100.
//
// * For instance types with NVIDIA V100 GPUs, specify v100.
//
// * For instance types with NVIDIA K80 GPUs, specify k80.
//
// * For instance types with NVIDIA T4 GPUs, specify t4.
//
// * For instance types with NVIDIA M60 GPUs, specify m60.
//
// * For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520.
//
// * For instance types with Xilinx VU9P FPGAs, specify vu9p.
// * For instance types with NVIDIA H100 GPUs, specify h100.
//
// * For instance types with Amazon Web Services Inferentia chips, specify
// inferentia.
//
// * For instance types with NVIDIA GRID K520 GPUs, specify k520.
//
// * For instance types with NVIDIA K80 GPUs, specify k80.
//
// * For instance types with NVIDIA M60 GPUs, specify m60.
//
// * For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520.
//
// * For instance types with NVIDIA T4 GPUs, specify t4.
//
// * For instance types with NVIDIA T4G GPUs, specify t4g.
//
// * For instance types with Xilinx VU9P FPGAs, specify vu9p.
//
// * For instance types with NVIDIA V100 GPUs, specify v100.
//
// Default: Any accelerator
AcceleratorNames []*string `locationName:"AcceleratorName" locationNameList:"item" type:"list" enum:"AcceleratorName"`
@ -142055,18 +142073,17 @@ type LaunchTemplateEbsBlockDeviceRequest struct {
//
// The following are the supported values for each volume type:
//
// * gp3: 3,000-16,000 IOPS
// * gp3: 3,000 - 16,000 IOPS
//
// * io1: 100-64,000 IOPS
// * io1: 100 - 64,000 IOPS
//
// * io2: 100-64,000 IOPS
// * io2: 100 - 256,000 IOPS
//
// For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built
// on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// Other instance families guarantee performance up to 32,000 IOPS.
// For io2 volumes, you can achieve up to 256,000 IOPS on instances built on
// the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// On other instances, you can achieve performance up to 32,000 IOPS.
//
// This parameter is supported for io1, io2, and gp3 volumes only. This parameter
// is not supported for gp2, st1, sc1, or standard volumes.
// This parameter is supported for io1, io2, and gp3 volumes only.
Iops *int64 `type:"integer"`
// The ARN of the symmetric Key Management Service (KMS) CMK used for encryption.
@ -142084,13 +142101,15 @@ type LaunchTemplateEbsBlockDeviceRequest struct {
// a volume size. The following are the supported volumes sizes for each volume
// type:
//
// * gp2 and gp3: 1-16,384
// * gp2 and gp3: 1 - 16,384 GiB
//
// * io1 and io2: 4-16,384
// * io1: 4 - 16,384 GiB
//
// * st1 and sc1: 125-16,384
// * io2: 4 - 65,536 GiB
//
// * standard: 1-1,024
// * st1 and sc1: 125 - 16,384 GiB
//
// * standard: 1 - 1024 GiB
VolumeSize *int64 `type:"integer"`
// The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
@ -153779,11 +153798,15 @@ type ModifyVolumeInput struct {
//
// The following are the supported values for each volume type:
//
// * gp3: 3,000-16,000 IOPS
// * gp3: 3,000 - 16,000 IOPS
//
// * io1: 100-64,000 IOPS
// * io1: 100 - 64,000 IOPS
//
// * io2: 100-64,000 IOPS
// * io2: 100 - 256,000 IOPS
//
// For io2 volumes, you can achieve up to 256,000 IOPS on instances built on
// the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// On other instances, you can achieve performance up to 32,000 IOPS.
//
// Default: The existing value is retained if you keep the same volume type.
// If you change the volume type to io1, io2, or gp3, the default is 3,000.
@ -153801,13 +153824,15 @@ type ModifyVolumeInput struct {
//
// The following are the supported volumes sizes for each volume type:
//
// * gp2 and gp3: 1-16,384
// * gp2 and gp3: 1 - 16,384 GiB
//
// * io1 and io2: 4-16,384
// * io1: 4 - 16,384 GiB
//
// * st1 and sc1: 125-16,384
// * io2: 4 - 65,536 GiB
//
// * standard: 1-1,024
// * st1 and sc1: 125 - 16,384 GiB
//
// * standard: 1 - 1024 GiB
//
// Default: The existing size is retained.
Size *int64 `type:"integer"`
@ -171882,16 +171907,8 @@ type ScheduledInstancesEbs struct {
// only to instances that support them.
Encrypted *bool `type:"boolean"`
// The number of I/O operations per second (IOPS) to provision for an io1 or
// io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB
// for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum
// IOPS of 64,000 is guaranteed only on instances built on the Nitro System
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances).
// Other instance families guarantee performance up to 32,000 IOPS. For more
// information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
// in the Amazon EC2 User Guide.
//
// This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes.
// The number of I/O operations per second (IOPS) to provision for a gp3, io1,
// or io2 volume.
Iops *int64 `type:"integer"`
// The ID of the snapshot.
@ -171903,9 +171920,7 @@ type ScheduledInstancesEbs struct {
// a volume size, the default is the snapshot size.
VolumeSize *int64 `type:"integer"`
// The volume type. gp2 for General Purpose SSD, io1 or io2 for Provisioned
// IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard
// for Magnetic.
// The volume type.
//
// Default: gp2
VolumeType *string `type:"string"`
@ -189035,6 +189050,9 @@ const (
// AcceleratorManufacturerXilinx is a AcceleratorManufacturer enum value
AcceleratorManufacturerXilinx = "xilinx"
// AcceleratorManufacturerHabana is a AcceleratorManufacturer enum value
AcceleratorManufacturerHabana = "habana"
)
// AcceleratorManufacturer_Values returns all elements of the AcceleratorManufacturer enum
@ -189044,6 +189062,7 @@ func AcceleratorManufacturer_Values() []string {
AcceleratorManufacturerAmd,
AcceleratorManufacturerNvidia,
AcceleratorManufacturerXilinx,
AcceleratorManufacturerHabana,
}
}
@ -189074,6 +189093,15 @@ const (
// AcceleratorNameV100 is a AcceleratorName enum value
AcceleratorNameV100 = "v100"
// AcceleratorNameA10g is a AcceleratorName enum value
AcceleratorNameA10g = "a10g"
// AcceleratorNameH100 is a AcceleratorName enum value
AcceleratorNameH100 = "h100"
// AcceleratorNameT4g is a AcceleratorName enum value
AcceleratorNameT4g = "t4g"
)
// AcceleratorName_Values returns all elements of the AcceleratorName enum
@ -189088,6 +189116,9 @@ func AcceleratorName_Values() []string {
AcceleratorNameT4,
AcceleratorNameVu9p,
AcceleratorNameV100,
AcceleratorNameA10g,
AcceleratorNameH100,
AcceleratorNameT4g,
}
}

File diff suppressed because it is too large Load diff

View file

@ -25,6 +25,15 @@ const (
// "InvalidObjectState".
//
// Object is archived and inaccessible until restored.
//
// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval
// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering
// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier,
// before you can retrieve the object you must first restore a copy using RestoreObject
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
// Otherwise, this operation returns an InvalidObjectState error. For information
// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
// in the Amazon S3 User Guide.
ErrCodeInvalidObjectState = "InvalidObjectState"
// ErrCodeNoSuchBucket for service response error code

View file

@ -80,6 +80,10 @@ type S3API interface {
CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
CreateSession(*s3.CreateSessionInput) (*s3.CreateSessionOutput, error)
CreateSessionWithContext(aws.Context, *s3.CreateSessionInput, ...request.Option) (*s3.CreateSessionOutput, error)
CreateSessionRequest(*s3.CreateSessionInput) (*request.Request, *s3.CreateSessionOutput)
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
@ -300,6 +304,13 @@ type S3API interface {
ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
ListDirectoryBuckets(*s3.ListDirectoryBucketsInput) (*s3.ListDirectoryBucketsOutput, error)
ListDirectoryBucketsWithContext(aws.Context, *s3.ListDirectoryBucketsInput, ...request.Option) (*s3.ListDirectoryBucketsOutput, error)
ListDirectoryBucketsRequest(*s3.ListDirectoryBucketsInput) (*request.Request, *s3.ListDirectoryBucketsOutput)
ListDirectoryBucketsPages(*s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool) error
ListDirectoryBucketsPagesWithContext(aws.Context, *s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool, ...request.Option) error
ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)

View file

@ -23,9 +23,32 @@ type UploadInput struct {
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
// The canned ACL to apply to the object. For more information, see Canned ACL
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
// in the Amazon S3 User Guide.
//
// This action is not supported by Amazon S3 on Outposts.
// When adding a new object, you can use headers to grant ACL-based permissions
// to individual Amazon Web Services accounts or to predefined groups defined
// by Amazon S3. These permissions are then added to the ACL on the object.
// By default, all objects are private. Only the owner has full access control.
// For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html)
// in the Amazon S3 User Guide.
//
// If the bucket that you're uploading objects to uses the bucket owner enforced
// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions.
// Buckets that use this setting only accept PUT requests that don't specify
// an ACL or PUT requests that specify bucket owner full control ACLs, such
// as the bucket-owner-full-control canned ACL or an equivalent form of this
// ACL expressed in the XML format. PUT requests that contain other ACLs (for
// example, custom grants to certain Amazon Web Services accounts) fail and
// return a 400 error with the error code AccessControlListNotSupported. For
// more information, see Controlling ownership of objects and disabling ACLs
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide.
//
// * This functionality is not supported for directory buckets.
//
// * This functionality is not supported for Amazon S3 on Outposts.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// The readable body payload to send to S3.
@ -33,19 +56,33 @@ type UploadInput struct {
// The bucket name to which the PUT action was initiated.
//
// When using this action with an access point, you must direct requests to
// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// Directory buckets - When you use this operation with a directory bucket,
// you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
// Path-style requests are not supported. Directory bucket names must be unique
// in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
// (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about
// bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide.
//
// Access points - When you use this action with an access point, you must provide
// the alias of the access point in place of the bucket name or specify the
// access point ARN. When using the access point ARN, you must direct requests
// to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
// When you use this action with Amazon S3 on Outposts, you must direct requests
// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
// you use this action with S3 on Outposts through the Amazon Web Services SDKs,
// you provide the Outposts access point ARN in place of the bucket name. For
// more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// Access points and Object Lambda access points are not supported by directory
// buckets.
//
// S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
// must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
// takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
// When you use this action with S3 on Outposts through the Amazon Web Services
// SDKs, you provide the Outposts access point ARN in place of the bucket name.
// For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@ -58,6 +95,8 @@ type UploadInput struct {
//
// Specifying this header with a PUT action doesnt affect bucket-level settings
// for S3 Bucket Key.
//
// This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Can be used to specify caching behavior along the request/reply chain. For
@ -65,16 +104,33 @@ type UploadInput struct {
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
// Indicates the algorithm used to create the checksum for the object when using
// the SDK. This header will not provide any additional functionality if not
// using the SDK. When sending this header, there must be a corresponding x-amz-checksum
// or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
// the HTTP status code 400 Bad Request. For more information, see Checking
// object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// Indicates the algorithm used to create the checksum for the object when you
// use the SDK. This header will not provide any additional functionality if
// you don't use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon
// S3 fails the request with the HTTP status code 400 Bad Request.
//
// For the x-amz-checksum-algorithm header, replace algorithm with the supported
// algorithm from the following list:
//
// * CRC32
//
// * CRC32C
//
// * SHA1
//
// * SHA256
//
// For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
// parameter.
// If the individual checksum value you provide through x-amz-checksum-algorithm
// doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm,
// Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum
// algorithm that matches the provided value in x-amz-checksum-algorithm .
//
// For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
// default checksum algorithm that's used for performance.
//
// The AWS SDK for Go v1 does not support automatic computing request payload
// checksum. This feature is available in the AWS SDK for Go v2. If a value
@ -130,6 +186,13 @@ type UploadInput struct {
// integrity check. For more information about REST request authentication,
// see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
//
// The Content-MD5 header is required for any request to upload an object with
// a retention period configured using Amazon S3 Object Lock. For more information
// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
// in the Amazon S3 User Guide.
//
// This functionality is not supported for directory buckets.
//
// If the ContentMD5 is provided for a multipart upload, it will be ignored.
// Objects that will be uploaded in a single part, the ContentMD5 will be used.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
@ -138,9 +201,9 @@ type UploadInput struct {
// see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type).
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the
// HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable. For more information,
@ -149,22 +212,30 @@ type UploadInput struct {
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
//
// This action is not supported by Amazon S3 on Outposts.
// * This functionality is not supported for directory buckets.
//
// * This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
//
// This action is not supported by Amazon S3 on Outposts.
// * This functionality is not supported for directory buckets.
//
// * This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
//
// This action is not supported by Amazon S3 on Outposts.
// * This functionality is not supported for directory buckets.
//
// * This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
//
// This action is not supported by Amazon S3 on Outposts.
// * This functionality is not supported for directory buckets.
//
// * This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the PUT action was initiated.
@ -176,27 +247,37 @@ type UploadInput struct {
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// Specifies whether a legal hold will be applied to this object. For more information
// about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
// about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
// in the Amazon S3 User Guide.
//
// This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
// The Object Lock mode that you want to apply to this object.
//
// This functionality is not supported for directory buckets.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// The date and time when you want this object's Object Lock to expire. Must
// be formatted as a timestamp parameter.
//
// This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either
// the source or destination Amazon S3 bucket has Requester Pays enabled, the
// requester will pay for corresponding charges to copy the object. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// the source or destination S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
//
// This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Specifies the algorithm to use to when encrypting the object (for example,
// AES256).
// Specifies the algorithm to use when encrypting the object (for example, AES256).
//
// This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@ -204,18 +285,25 @@ type UploadInput struct {
// S3 does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
//
// This functionality is not supported for directory buckets.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
//
// This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs. This value is stored as
// object metadata and automatically gets passed on to Amazon Web Services KMS
// for future GetObject or CopyObject operations on this object.
// for future GetObject or CopyObject operations on this object. This value
// must be explicitly added during CopyObject operations.
//
// This functionality is not supported for directory buckets.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse,
@ -226,28 +314,51 @@ type UploadInput struct {
// uses the Amazon Web Services managed key (aws/s3) to protect the data. If
// the KMS key does not exist in the same account that's issuing the command,
// you must use the full ARN and not just the ID.
//
// This functionality is not supported for directory buckets.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
// The server-side encryption algorithm used when storing this object in Amazon
// S3 (for example, AES256, aws:kms, aws:kms:dsse).
// The server-side encryption algorithm that was used when you store this object
// in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).
//
// General purpose buckets - You have four mutually exclusive options to protect
// data using server-side encryption in Amazon S3, depending on how you choose
// to manage the encryption keys. Specifically, the encryption key options are
// Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or
// DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with
// server-side encryption by using Amazon S3 managed keys (SSE-S3) by default.
// You can optionally tell Amazon S3 to encrypt data at rest by using server-side
// encryption with other key options. For more information, see Using Server-Side
// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
// in the Amazon S3 User Guide.
//
// Directory buckets - For directory buckets, only the server-side encryption
// with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high availability.
// Depending on performance needs, you can specify a different Storage Class.
// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide.
//
// * For directory buckets, only the S3 Express One Zone storage class is
// supported to store newly created objects.
//
// * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
// (For example, "Key1=Value1")
//
// This functionality is not supported for directory buckets.
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata. For information about object
// metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html).
// metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html)
// in the Amazon S3 User Guide.
//
// In the following example, the request header sets the redirect to an object
// (anotherPage.html) in the same bucket:
@ -261,6 +372,9 @@ type UploadInput struct {
//
// For more information about website hosting in Amazon S3, see Hosting Websites
// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
// and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
// and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
// in the Amazon S3 User Guide.
//
// This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}

View file

@ -1468,7 +1468,7 @@ type AssumeRoleInput struct {
// trusted context assertion and the ARN of the context provider from which
// the trusted context assertion was generated.
//
// [{"ProviderArn":"arn:aws:iam::aws:contextProvider/identitycenter","ContextAssertion":"trusted-context-assertion"}]
// [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
ProvidedContexts []*ProvidedContext `type:"list"`
// The Amazon Resource Name (ARN) of the role to assume.

View file

@ -1,3 +1,19 @@
## v1.8.0 (2023-11-30)
New features and improvements:
* [GH-2800](https://github.com/gophercloud/gophercloud/pull/2800) [v1] Fix options initialization in ServiceClient.Request (fixes #2798)
* [GH-2823](https://github.com/gophercloud/gophercloud/pull/2823) [v1] Add more godoc to GuestFormat
* [GH-2826](https://github.com/gophercloud/gophercloud/pull/2826) Allow objects.CreateTempURL with names containing /v1/
CI changes:
* [GH-2802](https://github.com/gophercloud/gophercloud/pull/2802) [v1] Add job for bobcat stable/2023.2
* [GH-2819](https://github.com/gophercloud/gophercloud/pull/2819) [v1] Test files alongside code
* [GH-2814](https://github.com/gophercloud/gophercloud/pull/2814) Make fixtures part of tests
* [GH-2796](https://github.com/gophercloud/gophercloud/pull/2796) [v1] ci/unit: switch to coverallsapp/github-action
* [GH-2840](https://github.com/gophercloud/gophercloud/pull/2840) unit tests: Fix the installation of tools
## v1.7.0 (2023-09-22)
New features and improvements:

View file

@ -14,7 +14,7 @@ import (
// DefaultUserAgent is the default User-Agent string set in the request header.
const (
DefaultUserAgent = "gophercloud/v1.7.0"
DefaultUserAgent = "gophercloud/v1.8.0"
DefaultMaxBackoffRetries = 60
)

View file

@ -47,7 +47,7 @@ func (client *ServiceClient) ServiceURL(parts ...string) string {
return client.ResourceBaseURL() + strings.Join(parts, "/")
}
func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) {
func (client *ServiceClient) initReqOpts(JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) {
if v, ok := (JSONBody).(io.Reader); ok {
opts.RawBody = v
} else if JSONBody != nil {
@ -57,14 +57,6 @@ func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONR
if JSONResponse != nil {
opts.JSONResponse = JSONResponse
}
if opts.MoreHeaders == nil {
opts.MoreHeaders = make(map[string]string)
}
if client.Microversion != "" {
client.setMicroversionHeader(opts)
}
}
// Get calls `Request` with the "GET" HTTP verb.
@ -72,7 +64,7 @@ func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *Req
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(url, nil, JSONResponse, opts)
client.initReqOpts(nil, JSONResponse, opts)
return client.Request("GET", url, opts)
}
@ -81,7 +73,7 @@ func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(url, JSONBody, JSONResponse, opts)
client.initReqOpts(JSONBody, JSONResponse, opts)
return client.Request("POST", url, opts)
}
@ -90,7 +82,7 @@ func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(url, JSONBody, JSONResponse, opts)
client.initReqOpts(JSONBody, JSONResponse, opts)
return client.Request("PUT", url, opts)
}
@ -99,7 +91,7 @@ func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONRespons
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(url, JSONBody, JSONResponse, opts)
client.initReqOpts(JSONBody, JSONResponse, opts)
return client.Request("PATCH", url, opts)
}
@ -108,7 +100,7 @@ func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Respon
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(url, nil, nil, opts)
client.initReqOpts(nil, nil, opts)
return client.Request("DELETE", url, opts)
}
@ -117,7 +109,7 @@ func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response
if opts == nil {
opts = new(RequestOpts)
}
client.initReqOpts(url, nil, nil, opts)
client.initReqOpts(nil, nil, opts)
return client.Request("HEAD", url, opts)
}
@ -142,10 +134,19 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) {
// Request carries out the HTTP operation for the service client
func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) {
if options.MoreHeaders == nil {
options.MoreHeaders = make(map[string]string)
}
if client.Microversion != "" {
client.setMicroversionHeader(options)
}
if len(client.MoreHeaders) > 0 {
if options == nil {
options = new(RequestOpts)
}
for k, v := range client.MoreHeaders {
options.MoreHeaders[k] = v
}

View file

@ -4,28 +4,11 @@ import (
"fmt"
"io"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
)
var RuntimeGOARCH = runtime.GOARCH
func CurrentArch() string {
if RuntimeGOARCH == "amd64" {
return "x86_64"
} else if RuntimeGOARCH == "arm64" {
return "aarch64"
} else if RuntimeGOARCH == "ppc64le" {
return "ppc64le"
} else if RuntimeGOARCH == "s390x" {
return "s390x"
} else {
panic("unsupported architecture")
}
}
func PanicOnError(err error) {
if err != nil {
panic(err)

81
vendor/github.com/osbuild/images/pkg/arch/arch.go generated vendored Normal file
View file

@ -0,0 +1,81 @@
package arch
import (
"runtime"
)
type Arch uint64
const ( // architecture enum
ARCH_AARCH64 Arch = iota
ARCH_PPC64LE
ARCH_S390X
ARCH_X86_64
)
func (a Arch) String() string {
switch a {
case ARCH_AARCH64:
return "aarch64"
case ARCH_PPC64LE:
return "ppc64le"
case ARCH_S390X:
return "s390x"
case ARCH_X86_64:
return "x86_64"
default:
panic("invalid architecture")
}
}
func FromString(a string) Arch {
switch a {
case "amd64":
fallthrough
case "x86_64":
return ARCH_X86_64
case "arm64":
fallthrough
case "aarch64":
return ARCH_AARCH64
case "s390x":
return ARCH_S390X
case "ppc64le":
return ARCH_PPC64LE
default:
panic("unsupported architecture")
}
}
var runtimeGOARCH = runtime.GOARCH
func Current() Arch {
switch runtimeGOARCH {
case "amd64":
return ARCH_X86_64
case "arm64":
return ARCH_AARCH64
case "ppc64le":
return ARCH_PPC64LE
case "s390x":
return ARCH_S390X
default:
panic("unsupported architecture")
}
}
func IsX86_64() bool {
return Current() == ARCH_X86_64
}
func IsAarch64() bool {
return Current() == ARCH_AARCH64
}
func IsPPC() bool {
return Current() == ARCH_PPC64LE
}
func IsS390x() bool {
return Current() == ARCH_S390X
}

View file

@ -385,7 +385,7 @@ func (pt *PartitionTable) applyCustomization(mountpoints []blueprint.FilesystemC
// Dynamically calculate and update the start point for each of the existing
// partitions. Adjusts the overall size of image to either the supplied
// value in `size` or to the sum of all partitions if that is lager.
// value in `size` or to the sum of all partitions if that is larger.
// Will grow the root partition if there is any empty space.
// Returns the updated start point.
func (pt *PartitionTable) relayout(size uint64) uint64 {
@ -406,6 +406,8 @@ func (pt *PartitionTable) relayout(size uint64) uint64 {
for idx := range pt.Partitions {
partition := &pt.Partitions[idx]
if len(entityPath(partition, "/")) != 0 {
// keep the root partition index to handle after all the other
// partitions have been moved and resized
rootIdx = idx
continue
}
@ -586,6 +588,13 @@ func resizeEntityBranch(path []Entity, size uint64) {
break
}
}
// If containerSize is 0, it means it doesn't have any direct sizeable
// children (e.g., a LUKS container with a VG child). In that case,
// set the containerSize to the desired size for the branch before
// adding any metadata.
if containerSize == 0 {
containerSize = size
}
if vc, ok := element.(VolumeContainer); ok {
containerSize += vc.MetadataSize()
}
@ -642,14 +651,17 @@ func (pt *PartitionTable) ensureLVM() error {
Description: "created via lvm2 and osbuild",
}
// create root logical volume on the new volume group with the same
// size and filesystem as the previous root partition
_, err := vg.CreateLogicalVolume("root", part.Size, filesystem)
if err != nil {
panic(fmt.Sprintf("Could not create LV: %v", err))
}
// replace the top-level partition payload with the new volume group
part.Payload = vg
// reset it so it will be grown later
// reset the vg partition size - it will be grown later
part.Size = 0
if pt.Type == "gpt" {

View file

@ -10,6 +10,7 @@ import (
"github.com/osbuild/images/internal/environment"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
@ -532,23 +533,23 @@ func newDistro(version int) distro.Distro {
// Architecture definitions
x86_64 := architecture{
name: platform.ARCH_X86_64.String(),
name: arch.ARCH_X86_64.String(),
distro: &rd,
}
aarch64 := architecture{
name: platform.ARCH_AARCH64.String(),
name: arch.ARCH_AARCH64.String(),
distro: &rd,
}
ppc64le := architecture{
distro: &rd,
name: platform.ARCH_PPC64LE.String(),
name: arch.ARCH_PPC64LE.String(),
}
s390x := architecture{
distro: &rd,
name: platform.ARCH_S390X.String(),
name: arch.ARCH_S390X.String(),
}
ociImgType := qcow2ImgType

View file

@ -491,7 +491,7 @@ func iotImage(workload workload.Workload,
if err != nil {
return nil, fmt.Errorf("%s: %s", t.Name(), err.Error())
}
img := image.NewOSTreeDiskImage(commit)
img := image.NewOSTreeDiskImageFromCommit(commit)
distro := t.Arch().Distro()
@ -522,9 +522,10 @@ func iotImage(workload workload.Workload,
Name: "fedora-iot",
}
img.OSName = "fedora-iot"
img.LockRoot = true
if !common.VersionLessThan(distro.Releasever(), "38") {
img.Ignition = true
img.KernelOptionsAppend = append(img.KernelOptionsAppend, "coreos.no_persist_ip")
switch img.Platform.GetImageFormat() {
case platform.FORMAT_RAW:
img.IgnitionPlatform = "metal"
@ -565,7 +566,7 @@ func iotSimplifiedInstallerImage(workload workload.Workload,
if err != nil {
return nil, fmt.Errorf("%s: %s", t.Name(), err.Error())
}
rawImg := image.NewOSTreeDiskImage(commit)
rawImg := image.NewOSTreeDiskImageFromCommit(commit)
customizations := bp.Customizations
rawImg.Users = users.UsersFromBP(customizations.GetUsers())
@ -585,10 +586,11 @@ func iotSimplifiedInstallerImage(workload workload.Workload,
Name: "fedora-iot",
}
rawImg.OSName = "fedora"
rawImg.LockRoot = true
if !common.VersionLessThan(t.arch.distro.osVersion, "38") {
rawImg.Ignition = true
rawImg.IgnitionPlatform = "metal"
rawImg.KernelOptionsAppend = append(rawImg.KernelOptionsAppend, "coreos.no_persist_ip")
if bpIgnition := customizations.GetIgnition(); bpIgnition != nil && bpIgnition.FirstBoot != nil && bpIgnition.FirstBoot.ProvisioningURL != "" {
rawImg.KernelOptionsAppend = append(rawImg.KernelOptionsAppend, "ignition.config.url="+bpIgnition.FirstBoot.ProvisioningURL)
}

View file

@ -7,7 +7,7 @@ import (
"strconv"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -364,7 +364,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
}
switch t.Arch().Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"biosdevname",
@ -374,7 +374,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
},
})
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"dmidecode",

View file

@ -2,13 +2,13 @@ package fedora
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/platform"
)
var defaultBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -60,7 +60,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -106,7 +106,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_PPC64LE.String(): disk.PartitionTable{
arch.ARCH_PPC64LE.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: "dos",
Partitions: []disk.Partition{
@ -139,7 +139,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
},
platform.ARCH_S390X.String(): disk.PartitionTable{
arch.ARCH_S390X.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: "dos",
Partitions: []disk.Partition{
@ -170,7 +170,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
}
var minimalrawPartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
StartOffset: 8 * common.MebiByte,
@ -217,7 +217,7 @@ var minimalrawPartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: "dos",
StartOffset: 8 * common.MebiByte,
@ -265,7 +265,7 @@ var minimalrawPartitionTables = distro.BasePartitionTableMap{
}
var iotBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -311,7 +311,7 @@ var iotBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: "dos",
Partitions: []disk.Partition{
@ -358,7 +358,7 @@ var iotBasePartitionTables = distro.BasePartitionTableMap{
}
var iotSimplifiedInstallerPartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -428,7 +428,7 @@ var iotSimplifiedInstallerPartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: "dos",
Partitions: []disk.Partition{

View file

@ -2,10 +2,10 @@ package rhel7
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/subscription"
)
@ -273,7 +273,7 @@ func azureRhuiCommonPackageSet(t *imageType) rpmmd.PackageSet {
}
var azureRhuiBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Size: 64 * common.GibiByte,

View file

@ -7,6 +7,7 @@ import (
"strings"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
@ -200,7 +201,7 @@ func newDistro(distroName string) distro.Distro {
// Architecture definitions
x86_64 := architecture{
name: platform.ARCH_X86_64.String(),
name: arch.ARCH_X86_64.String(),
distro: &rd,
}

View file

@ -7,13 +7,13 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/users"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/image"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -40,7 +40,7 @@ func osCustomizations(
kernelOptions = append(kernelOptions, bpKernel.Append)
}
osc.KernelOptionsAppend = kernelOptions
if t.platform.GetArch() != platform.ARCH_S390X {
if t.platform.GetArch() != arch.ARCH_S390X {
osc.KernelOptionsBootloader = true
}
}

View file

@ -2,13 +2,13 @@ package rhel7
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/platform"
)
var defaultBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{

View file

@ -3,10 +3,10 @@ package rhel8
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/shell"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/subscription"
)
@ -261,7 +261,7 @@ func azureEapPackageSet(t *imageType) rpmmd.PackageSet {
// PARTITION TABLES
var azureRhuiBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Size: 64 * common.GibiByte,
@ -369,7 +369,7 @@ var azureRhuiBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Size: 64 * common.GibiByte,

View file

@ -3,7 +3,7 @@ package rhel8
import (
"fmt"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -146,7 +146,7 @@ func installerPackageSet(t *imageType) rpmmd.PackageSet {
}
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"biosdevname",
@ -278,7 +278,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
ps = ps.Append(anacondaBootPackageSet(t))
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"biosdevname",
@ -287,7 +287,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
},
})
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"dmidecode",

View file

@ -8,6 +8,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
@ -198,22 +199,22 @@ func newDistro(name string, minor int) *distribution {
// Architecture definitions
x86_64 := architecture{
name: platform.ARCH_X86_64.String(),
name: arch.ARCH_X86_64.String(),
distro: &rd,
}
aarch64 := architecture{
name: platform.ARCH_AARCH64.String(),
name: arch.ARCH_AARCH64.String(),
distro: &rd,
}
ppc64le := architecture{
distro: &rd,
name: platform.ARCH_PPC64LE.String(),
name: arch.ARCH_PPC64LE.String(),
}
s390x := architecture{
distro: &rd,
name: platform.ARCH_S390X.String(),
name: arch.ARCH_S390X.String(),
}
ociImgType := qcow2ImgType(rd)

View file

@ -5,8 +5,8 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -249,10 +249,10 @@ func edgeCommitPackageSet(t *imageType) rpmmd.PackageSet {
}
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(x8664EdgeCommitPackageSet(t))
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(aarch64EdgeCommitPackageSet(t))
}
@ -370,9 +370,9 @@ func edgeSimplifiedInstallerPackageSet(t *imageType) rpmmd.PackageSet {
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(x8664EdgeCommitPackageSet(t))
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(aarch64EdgeCommitPackageSet(t))
default:

View file

@ -10,6 +10,7 @@ import (
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/internal/users"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/distro"
@ -17,7 +18,6 @@ import (
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/ostree"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -44,7 +44,7 @@ func osCustomizations(
kernelOptions = append(kernelOptions, bpKernel.Append)
}
osc.KernelOptionsAppend = kernelOptions
if t.platform.GetArch() != platform.ARCH_S390X {
if t.platform.GetArch() != arch.ARCH_S390X {
osc.KernelOptionsBootloader = true
}
}
@ -443,7 +443,7 @@ func edgeRawImage(workload workload.Workload,
return nil, fmt.Errorf("%s: %s", t.Name(), err.Error())
}
img := image.NewOSTreeDiskImage(commit)
img := image.NewOSTreeDiskImageFromCommit(commit)
img.Users = users.UsersFromBP(customizations.GetUsers())
img.Groups = users.GroupsFromBP(customizations.GetGroups())
@ -461,6 +461,7 @@ func edgeRawImage(workload workload.Workload,
ContentURL: options.OSTree.ContentURL,
}
img.OSName = "redhat"
img.LockRoot = true
// TODO: move generation into LiveImage
pt, err := t.getPartitionTable(customizations.GetFilesystems(), options, rng)
@ -488,7 +489,7 @@ func edgeSimplifiedInstallerImage(workload workload.Workload,
return nil, fmt.Errorf("%s: %s", t.Name(), err.Error())
}
rawImg := image.NewOSTreeDiskImage(commit)
rawImg := image.NewOSTreeDiskImageFromCommit(commit)
rawImg.Users = users.UsersFromBP(customizations.GetUsers())
rawImg.Groups = users.GroupsFromBP(customizations.GetGroups())
@ -505,6 +506,7 @@ func edgeSimplifiedInstallerImage(workload workload.Workload,
ContentURL: options.OSTree.ContentURL,
}
rawImg.OSName = "redhat"
rawImg.LockRoot = true
// TODO: move generation into LiveImage
pt, err := t.getPartitionTable(customizations.GetFilesystems(), options, rng)

View file

@ -5,7 +5,7 @@ package rhel8
import (
"fmt"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -30,7 +30,7 @@ func anacondaBootPackageSet(t *imageType) rpmmd.PackageSet {
}
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(grubCommon)
ps = ps.Append(efiCommon)
ps = ps.Append(rpmmd.PackageSet{
@ -46,7 +46,7 @@ func anacondaBootPackageSet(t *imageType) rpmmd.PackageSet {
"syslinux-nonlinux",
},
})
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(grubCommon)
ps = ps.Append(efiCommon)
ps = ps.Append(rpmmd.PackageSet{

View file

@ -2,13 +2,13 @@ package rhel8
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/platform"
)
var defaultBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -46,7 +46,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -78,7 +78,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_PPC64LE.String(): disk.PartitionTable{
arch.ARCH_PPC64LE.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: "dos",
Partitions: []disk.Partition{
@ -99,7 +99,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_S390X.String(): disk.PartitionTable{
arch.ARCH_S390X.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: "dos",
Partitions: []disk.Partition{
@ -119,7 +119,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
}
var ec2BasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -171,7 +171,7 @@ var ec2BasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -222,7 +222,7 @@ var ec2BasePartitionTables = distro.BasePartitionTableMap{
// ec2LegacyBasePartitionTables is the partition table layout for RHEL EC2
// images prior to 8.9. It is used for backwards compatibility.
var ec2LegacyBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -247,7 +247,7 @@ var ec2LegacyBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -294,7 +294,7 @@ var ec2LegacyBasePartitionTables = distro.BasePartitionTableMap{
}
var edgeBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
@ -361,7 +361,7 @@ var edgeBasePartitionTables = distro.BasePartitionTableMap{
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{

View file

@ -155,14 +155,18 @@ func openstackCommonPackageSet(t *imageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
// Defaults
"@Core", "langpacks-en",
"@Core",
"langpacks-en",
// From the lorax kickstart
"selinux-policy-targeted", "cloud-init", "qemu-guest-agent",
"selinux-policy-targeted",
"cloud-init",
"qemu-guest-agent",
"spice-vdagent",
},
Exclude: []string{
"dracut-config-rescue", "rng-tools",
"dracut-config-rescue",
"rng-tools",
},
}
}

View file

@ -300,6 +300,7 @@ func ec2BuildPackageSet(t *imageType) rpmmd.PackageSet {
func ec2CommonPackageSet(t *imageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
"@core",
"authselect-compat",
"chrony",
"cloud-init",
@ -314,6 +315,7 @@ func ec2CommonPackageSet(t *imageType) rpmmd.PackageSet {
"redhat-release",
"redhat-release-eula",
"rsync",
"tuned",
"tar",
},
Exclude: []string{
@ -321,16 +323,35 @@ func ec2CommonPackageSet(t *imageType) rpmmd.PackageSet {
"alsa-firmware",
"alsa-tools-firmware",
"biosdevname",
"firewalld",
"iprutils",
"ivtv-firmware",
"iwl1000-firmware",
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl3945-firmware",
"iwl4965-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6000-firmware",
"iwl6000g2a-firmware",
"iwl6000g2b-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"libertas-sd8686-firmware",
"libertas-sd8787-firmware",
"libertas-usb8388-firmware",
"plymouth",
// RHBZ#2064087
"dracut-config-rescue",
// RHBZ#2075815
"qemu-guest-agent",
},
}.Append(coreOsCommonPackageSet(t)).Append(distroSpecificPackageSet(t))
}.Append(distroSpecificPackageSet(t))
}
// common rhel ec2 RHUI image package set

View file

@ -2,10 +2,10 @@ package rhel9
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/subscription"
)
@ -149,14 +149,6 @@ func azureCommonPackageSet(t *imageType) rpmmd.PackageSet {
},
}.Append(distroSpecificPackageSet(t))
if t.arch.distro.isRHEL() {
ps.Append(rpmmd.PackageSet{
Include: []string{
"rhc",
},
})
}
return ps
}
@ -184,7 +176,7 @@ func azureRhuiBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
}
switch t.platform.GetArch() {
case platform.ARCH_X86_64:
case arch.ARCH_X86_64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
@ -293,7 +285,7 @@ func azureRhuiBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
},
},
}, true
case platform.ARCH_AARCH64:
case arch.ARCH_AARCH64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",

View file

@ -3,7 +3,7 @@ package rhel9
import (
"fmt"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -47,6 +47,7 @@ var (
func bareMetalPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"@core",
"authselect-compat",
"chrony",
"cockpit-system",
@ -82,8 +83,12 @@ func bareMetalPackageSet(t *imageType) rpmmd.PackageSet {
"rsync",
"tar",
"tcpdump",
"tuned",
},
}.Append(coreOsCommonPackageSet(t)).Append(distroBuildPackageSet(t))
Exclude: []string{
"dracut-config-rescue",
},
}.Append(distroBuildPackageSet(t))
// Ensure to not pull in subscription-manager on non-RHEL distro
if t.arch.distro.isRHEL() {
@ -133,8 +138,22 @@ func installerPackageSet(t *imageType) rpmmd.PackageSet {
},
}
ps = ps.Append(rpmmd.PackageSet{
// Extra packages that are required by the dracut stage of all installers.
// These are weak deps of other packages in the list above, but lets be
// explicit about what we need and not rely on the weak deps. Relying
// on hard-dependencies for other modules is OK for now.
//
// TODO: add these dynamically based on the modules enabled by each
// pipeline.
Include: []string{
"mdadm",
"nss-softokn",
},
})
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"biosdevname",
@ -292,7 +311,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
ps = ps.Append(anacondaBootPackageSet(t))
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"biosdevname",
@ -302,7 +321,7 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet {
},
})
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"dmidecode",

View file

@ -8,6 +8,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
@ -190,23 +191,23 @@ func newDistro(name string, minor int) *distribution {
// Architecture definitions
x86_64 := architecture{
name: platform.ARCH_X86_64.String(),
name: arch.ARCH_X86_64.String(),
distro: &rd,
}
aarch64 := architecture{
name: platform.ARCH_AARCH64.String(),
name: arch.ARCH_AARCH64.String(),
distro: &rd,
}
ppc64le := architecture{
distro: &rd,
name: platform.ARCH_PPC64LE.String(),
name: arch.ARCH_PPC64LE.String(),
}
s390x := architecture{
distro: &rd,
name: platform.ARCH_S390X.String(),
name: arch.ARCH_S390X.String(),
}
qcow2ImgType := mkQcow2ImgType(rd)

View file

@ -6,10 +6,10 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/environment"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -237,7 +237,7 @@ func minimalrawPartitionTables(t *imageType) (disk.PartitionTable, bool) {
}
switch t.platform.GetArch() {
case platform.ARCH_X86_64:
case arch.ARCH_X86_64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
@ -285,7 +285,7 @@ func minimalrawPartitionTables(t *imageType) (disk.PartitionTable, bool) {
},
},
}, true
case platform.ARCH_AARCH64:
case arch.ARCH_AARCH64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
@ -340,7 +340,7 @@ func minimalrawPartitionTables(t *imageType) (disk.PartitionTable, bool) {
func edgeBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
switch t.platform.GetArch() {
case platform.ARCH_X86_64:
case arch.ARCH_X86_64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
@ -417,7 +417,7 @@ func edgeBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
},
},
}, true
case platform.ARCH_AARCH64:
case arch.ARCH_AARCH64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
@ -582,10 +582,10 @@ func edgeCommitPackageSet(t *imageType) rpmmd.PackageSet {
}
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(x8664EdgeCommitPackageSet(t))
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(aarch64EdgeCommitPackageSet(t))
}
@ -682,9 +682,9 @@ func edgeSimplifiedInstallerPackageSet(t *imageType) rpmmd.PackageSet {
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(x8664EdgeCommitPackageSet(t))
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(aarch64EdgeCommitPackageSet(t))
default:

View file

@ -214,6 +214,7 @@ func defaultGceRhuiImageConfig(rhsm bool) *distro.ImageConfig {
func gceCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"@core",
"langpacks-en", // not in Google's KS
"acpid",
"dhcp-client",
@ -237,12 +238,14 @@ func gceCommonPackageSet(t *imageType) rpmmd.PackageSet {
// EFI
"grub2-tools",
"grub2-tools-minimal",
"firewalld", // not pulled in any more as on RHEL-8
// Performance tuning
"tuned",
},
Exclude: []string{
"alsa-utils",
"b43-fwcutter",
"dmraid",
"dracut-config-rescue",
"eject",
"gpm",
"irqbalance",
@ -256,7 +259,12 @@ func gceCommonPackageSet(t *imageType) rpmmd.PackageSet {
"ipw2200-firmware",
"ivtv-firmware",
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl1000-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl3945-firmware",
"iwl4965-firmware",
"iwl5000-firmware",
@ -264,6 +272,7 @@ func gceCommonPackageSet(t *imageType) rpmmd.PackageSet {
"iwl6000-firmware",
"iwl6000g2a-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"kernel-firmware",
"libertas-usb8388-firmware",
"ql2100-firmware",
@ -278,12 +287,9 @@ func gceCommonPackageSet(t *imageType) rpmmd.PackageSet {
// RHBZ#2075815
"qemu-guest-agent",
},
}.Append(coreOsCommonPackageSet(t)).Append(distroSpecificPackageSet(t))
}.Append(distroSpecificPackageSet(t))
// Some excluded packages are part of the @core group package set returned
// by coreOsCommonPackageSet(). Ensure that the conflicting packages are
// returned from the list of `Include` packages.
return ps.ResolveConflictsExclude()
return ps
}
// GCE BYOS image

View file

@ -401,7 +401,7 @@ func edgeRawImage(workload workload.Workload,
if err != nil {
return nil, fmt.Errorf("%s: %s", t.Name(), err.Error())
}
img := image.NewOSTreeDiskImage(commit)
img := image.NewOSTreeDiskImageFromCommit(commit)
img.Users = users.UsersFromBP(customizations.GetUsers())
img.Groups = users.GroupsFromBP(customizations.GetGroups())
@ -421,8 +421,8 @@ func edgeRawImage(workload workload.Workload,
}
if !common.VersionLessThan(t.arch.distro.osVersion, "9.2") || !t.arch.distro.isRHEL() {
img.Ignition = true
img.IgnitionPlatform = "metal"
img.KernelOptionsAppend = append(img.KernelOptionsAppend, "coreos.no_persist_ip")
if bpIgnition := customizations.GetIgnition(); bpIgnition != nil && bpIgnition.FirstBoot != nil && bpIgnition.FirstBoot.ProvisioningURL != "" {
img.KernelOptionsAppend = append(img.KernelOptionsAppend, "ignition.config.url="+bpIgnition.FirstBoot.ProvisioningURL)
}
@ -436,6 +436,7 @@ func edgeRawImage(workload workload.Workload,
ContentURL: options.OSTree.ContentURL,
}
img.OSName = "redhat"
img.LockRoot = true
if kopts := customizations.GetKernel(); kopts != nil && kopts.Append != "" {
img.KernelOptionsAppend = append(img.KernelOptionsAppend, kopts.Append)
@ -466,7 +467,7 @@ func edgeSimplifiedInstallerImage(workload workload.Workload,
if err != nil {
return nil, fmt.Errorf("%s: %s", t.Name(), err.Error())
}
rawImg := image.NewOSTreeDiskImage(commit)
rawImg := image.NewOSTreeDiskImageFromCommit(commit)
rawImg.Users = users.UsersFromBP(customizations.GetUsers())
rawImg.Groups = users.GroupsFromBP(customizations.GetGroups())
@ -488,10 +489,11 @@ func edgeSimplifiedInstallerImage(workload workload.Workload,
ContentURL: options.OSTree.ContentURL,
}
rawImg.OSName = "redhat"
rawImg.LockRoot = true
if !common.VersionLessThan(t.arch.distro.osVersion, "9.2") || !t.arch.distro.isRHEL() {
rawImg.Ignition = true
rawImg.IgnitionPlatform = "metal"
rawImg.KernelOptionsAppend = append(rawImg.KernelOptionsAppend, "coreos.no_persist_ip")
if bpIgnition := customizations.GetIgnition(); bpIgnition != nil && bpIgnition.FirstBoot != nil && bpIgnition.FirstBoot.ProvisioningURL != "" {
rawImg.KernelOptionsAppend = append(rawImg.KernelOptionsAppend, "ignition.config.url="+bpIgnition.FirstBoot.ProvisioningURL)
}

View file

@ -5,7 +5,7 @@ package rhel9
import (
"fmt"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -35,10 +35,10 @@ func distroBuildPackageSet(t *imageType) rpmmd.PackageSet {
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(x8664BuildPackageSet(t))
case platform.ARCH_PPC64LE.String():
case arch.ARCH_PPC64LE.String():
ps = ps.Append(ppc64leBuildPackageSet(t))
}
@ -84,7 +84,7 @@ func anacondaBootPackageSet(t *imageType) rpmmd.PackageSet {
}
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
case arch.ARCH_X86_64.String():
ps = ps.Append(grubCommon)
ps = ps.Append(efiCommon)
ps = ps.Append(rpmmd.PackageSet{
@ -98,7 +98,7 @@ func anacondaBootPackageSet(t *imageType) rpmmd.PackageSet {
"syslinux-nonlinux",
},
})
case platform.ARCH_AARCH64.String():
case arch.ARCH_AARCH64.String():
ps = ps.Append(grubCommon)
ps = ps.Append(efiCommon)
ps = ps.Append(rpmmd.PackageSet{
@ -118,122 +118,6 @@ func anacondaBootPackageSet(t *imageType) rpmmd.PackageSet {
// OS package sets
// Replacement of the previously used @core package group
func coreOsCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"audit",
"basesystem",
"bash",
"coreutils",
"cronie",
"crypto-policies",
"crypto-policies-scripts",
"curl",
"dnf",
"yum",
"e2fsprogs",
"filesystem",
"glibc",
"grubby",
"hostname",
"iproute",
"iproute-tc",
"iputils",
"kbd",
"kexec-tools",
"less",
"logrotate",
"man-db",
"ncurses",
"openssh-clients",
"openssh-server",
"p11-kit",
"parted",
"passwd",
"policycoreutils",
"procps-ng",
"rootfiles",
"rpm",
"rpm-plugin-audit",
"rsyslog",
"selinux-policy-targeted",
"setup",
"shadow-utils",
"sssd-common",
"sssd-kcm",
"sudo",
"systemd",
"tuned",
"util-linux",
"vim-minimal",
"xfsprogs",
"authselect",
"prefixdevname",
"dnf-plugins-core",
"NetworkManager",
"NetworkManager-team",
"NetworkManager-tui",
"libsysfs",
"linux-firmware",
"lshw",
"lsscsi",
"kernel-tools",
"sg3_utils",
"sg3_utils-libs",
"python3-libselinux",
},
}
// Do not include this in the distroSpecificPackageSet for now,
// because it includes 'insights-client' which is not installed
// by default on all RHEL images (although it would probably make sense).
if t.arch.distro.isRHEL() {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"subscription-manager",
},
})
}
switch t.arch.Name() {
case platform.ARCH_X86_64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"irqbalance",
"microcode_ctl",
},
})
case platform.ARCH_AARCH64.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"irqbalance",
},
})
case platform.ARCH_PPC64LE.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"irqbalance",
"opal-prd",
"ppc64-diag-rtas",
"powerpc-utils-core",
"lsvpd",
},
})
case platform.ARCH_S390X.String():
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"s390utils-core",
},
})
}
return ps
}
// packages that are only in some (sub)-distributions
func distroSpecificPackageSet(t *imageType) rpmmd.PackageSet {
if t.arch.distro.isRHEL() {

View file

@ -2,8 +2,8 @@ package rhel9
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/platform"
)
func defaultBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
@ -14,7 +14,7 @@ func defaultBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
}
switch t.platform.GetArch() {
case platform.ARCH_X86_64:
case arch.ARCH_X86_64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
@ -67,7 +67,7 @@ func defaultBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
},
},
}, true
case platform.ARCH_AARCH64:
case arch.ARCH_AARCH64:
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
@ -114,7 +114,7 @@ func defaultBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
},
},
}, true
case platform.ARCH_PPC64LE:
case arch.ARCH_PPC64LE:
return disk.PartitionTable{
UUID: "0x14fc63d2",
Type: "dos",
@ -148,7 +148,7 @@ func defaultBasePartitionTables(t *imageType) (disk.PartitionTable, bool) {
},
}, true
case platform.ARCH_S390X:
case arch.ARCH_S390X:
return disk.PartitionTable{
UUID: "0x14fc63d2",
Type: "dos",

View file

@ -4,7 +4,6 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/subscription"
)
@ -34,6 +33,7 @@ var (
func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"@core",
"authselect-compat",
"chrony",
"cloud-init",
@ -52,6 +52,7 @@ func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
"redhat-release-eula",
"rsync",
"tar",
"tuned",
"tcpdump",
},
Exclude: []string{
@ -61,10 +62,28 @@ func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
"alsa-tools-firmware",
"biosdevname",
"dnf-plugin-spacewalk",
"dracut-config-rescue",
"fedora-release",
"fedora-repos",
"firewalld",
"iprutils",
"ivtv-firmware",
"iwl1000-firmware",
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl3945-firmware",
"iwl4965-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6000-firmware",
"iwl6000g2a-firmware",
"iwl6000g2b-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"langpacks-*",
"langpacks-en",
"libertas-sd8787-firmware",
@ -73,7 +92,7 @@ func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
"rng-tools",
"udisks2",
},
}.Append(coreOsCommonPackageSet(t)).Append(distroSpecificPackageSet(t))
}.Append(distroSpecificPackageSet(t))
// Ensure to not pull in subscription-manager on non-RHEL distro
if t.arch.distro.isRHEL() {
@ -91,8 +110,9 @@ func openstackCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
// Defaults
"@core",
"langpacks-en",
"firewalld",
"tuned",
// From the lorax kickstart
"cloud-init",
@ -100,30 +120,9 @@ func openstackCommonPackageSet(t *imageType) rpmmd.PackageSet {
"spice-vdagent",
},
Exclude: []string{
"dracut-config-rescue",
"rng-tools",
},
}.Append(coreOsCommonPackageSet(t))
if t.arch.Name() == platform.ARCH_X86_64.String() {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
// packages below used to come from @core group and were not excluded
// they may not be needed at all, but kept them here to not need
// to exclude them instead in all other images
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl1000-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6000g2a-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
},
})
}
return ps

View file

@ -3,7 +3,6 @@ package rhel9
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -52,37 +51,18 @@ var ovaImgType = imageType{
func vmdkCommonPackageSet(t *imageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"@core",
"chrony",
"cloud-init",
"firewalld",
"langpacks-en",
"open-vm-tools",
"tuned",
},
Exclude: []string{
"dracut-config-rescue",
"rng-tools",
},
}.Append(coreOsCommonPackageSet(t))
if t.arch.Name() == platform.ARCH_X86_64.String() {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
// packages below used to come from @core group and were not excluded
// they may not be needed at all, but kept them here to not need
// to exclude them instead in all other images
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl1000-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6000g2a-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
},
})
}
return ps

View file

@ -6,6 +6,7 @@ import (
"strings"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/fedora"
"github.com/osbuild/images/pkg/distro/rhel7"
@ -52,7 +53,7 @@ func New(hostDistro distro.Distro, distros ...distro.Distro) (*Registry, error)
reg := &Registry{
distros: make(map[string]distro.Distro),
hostDistro: hostDistro,
hostArchName: common.CurrentArch(),
hostArchName: arch.Current().String(),
}
for _, d := range distros {
name := d.Name()

View file

@ -7,6 +7,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/environment"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/manifest"
@ -61,7 +62,7 @@ func (img *AnacondaLiveInstaller) InstantiateManifest(m *manifest.Manifest,
livePipeline.ExcludePackages = img.ExtraBasePackages.Exclude
livePipeline.Variant = img.Variant
livePipeline.Biosdevname = (img.Platform.GetArch() == platform.ARCH_X86_64)
livePipeline.Biosdevname = (img.Platform.GetArch() == arch.ARCH_X86_64)
livePipeline.Checkpoint()
@ -103,7 +104,7 @@ func (img *AnacondaLiveInstaller) InstantiateManifest(m *manifest.Manifest,
bootTreePipeline.KernelOpts = kernelOpts
// enable ISOLinux on x86_64 only
isoLinuxEnabled := img.Platform.GetArch() == platform.ARCH_X86_64
isoLinuxEnabled := img.Platform.GetArch() == arch.ARCH_X86_64
isoTreePipeline := manifest.NewAnacondaInstallerISOTree(buildPipeline, livePipeline, rootfsImagePipeline, bootTreePipeline)
isoTreePipeline.PartitionTable = rootfsPartitionTable

View file

@ -6,6 +6,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/users"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/manifest"
@ -69,7 +70,7 @@ func (img *AnacondaOSTreeInstaller) InstantiateManifest(m *manifest.Manifest,
anacondaPipeline.Users = img.Users
anacondaPipeline.Groups = img.Groups
anacondaPipeline.Variant = img.Variant
anacondaPipeline.Biosdevname = (img.Platform.GetArch() == platform.ARCH_X86_64)
anacondaPipeline.Biosdevname = (img.Platform.GetArch() == arch.ARCH_X86_64)
anacondaPipeline.Checkpoint()
anacondaPipeline.AdditionalDracutModules = img.AdditionalDracutModules
anacondaPipeline.AdditionalAnacondaModules = img.AdditionalAnacondaModules
@ -112,7 +113,7 @@ func (img *AnacondaOSTreeInstaller) InstantiateManifest(m *manifest.Manifest,
}
// enable ISOLinux on x86_64 only
isoLinuxEnabled := img.Platform.GetArch() == platform.ARCH_X86_64
isoLinuxEnabled := img.Platform.GetArch() == arch.ARCH_X86_64
isoTreePipeline := manifest.NewAnacondaInstallerISOTree(buildPipeline, anacondaPipeline, rootfsImagePipeline, bootTreePipeline)
isoTreePipeline.PartitionTable = rootfsPartitionTable
@ -129,6 +130,9 @@ func (img *AnacondaOSTreeInstaller) InstantiateManifest(m *manifest.Manifest,
isoTreePipeline.OSTreeCommitSource = &img.Commit
isoTreePipeline.ISOLinux = isoLinuxEnabled
if img.FIPS {
isoTreePipeline.KernelOpts = append(isoTreePipeline.KernelOpts, "fips=1")
}
isoPipeline := manifest.NewISO(buildPipeline, isoTreePipeline, isoLabel)
isoPipeline.SetFilename(img.Filename)

View file

@ -9,6 +9,7 @@ import (
"github.com/osbuild/images/internal/environment"
"github.com/osbuild/images/internal/users"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/manifest"
@ -80,7 +81,7 @@ func (img *AnacondaTarInstaller) InstantiateManifest(m *manifest.Manifest,
anacondaPipeline.Users = img.Users
anacondaPipeline.Groups = img.Groups
anacondaPipeline.Variant = img.Variant
anacondaPipeline.Biosdevname = (img.Platform.GetArch() == platform.ARCH_X86_64)
anacondaPipeline.Biosdevname = (img.Platform.GetArch() == arch.ARCH_X86_64)
anacondaPipeline.AdditionalAnacondaModules = img.AdditionalAnacondaModules
anacondaPipeline.AdditionalDracutModules = img.AdditionalDracutModules
anacondaPipeline.AdditionalDrivers = img.AdditionalDrivers
@ -133,7 +134,7 @@ func (img *AnacondaTarInstaller) InstantiateManifest(m *manifest.Manifest,
osPipeline.Workload = img.Workload
// enable ISOLinux on x86_64 only
isoLinuxEnabled := img.Platform.GetArch() == platform.ARCH_X86_64
isoLinuxEnabled := img.Platform.GetArch() == arch.ARCH_X86_64
isoTreePipeline := manifest.NewAnacondaInstallerISOTree(buildPipeline, anacondaPipeline, rootfsImagePipeline, bootTreePipeline)
isoTreePipeline.PartitionTable = rootfsPartitionTable

View file

@ -31,6 +31,8 @@ type OSTreeArchive struct {
Filename string
InstallWeakDeps bool
BootContainer bool
}
func NewOSTreeArchive(ref string) *OSTreeArchive {
@ -59,9 +61,16 @@ func (img *OSTreeArchive) InstantiateManifest(m *manifest.Manifest,
ostreeCommitPipeline := manifest.NewOSTreeCommit(buildPipeline, osPipeline, img.OSTreeRef)
ostreeCommitPipeline.OSVersion = img.OSVersion
var artifact *artifact.Artifact
if img.BootContainer {
encapsulatePipeline := manifest.NewOSTreeEncapsulate(buildPipeline, ostreeCommitPipeline, "ostree-encapsulate")
encapsulatePipeline.SetFilename(img.Filename)
artifact = encapsulatePipeline.Export()
} else {
tarPipeline := manifest.NewTar(buildPipeline, ostreeCommitPipeline, "commit-archive")
tarPipeline.SetFilename(img.Filename)
artifact := tarPipeline.Export()
artifact = tarPipeline.Export()
}
return artifact, nil
}

View file

@ -8,6 +8,7 @@ import (
"github.com/osbuild/images/internal/users"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/ostree"
@ -26,12 +27,14 @@ type OSTreeDiskImage struct {
Users []users.User
Groups []users.Group
CommitSource ostree.SourceSpec
CommitSource *ostree.SourceSpec
ContainerSource *container.SourceSpec
SysrootReadOnly bool
Remote ostree.Remote
OSName string
Ref string
KernelOptionsAppend []string
Keyboard string
@ -39,7 +42,6 @@ type OSTreeDiskImage struct {
Filename string
Ignition bool
IgnitionPlatform string
Compression string
@ -47,17 +49,38 @@ type OSTreeDiskImage struct {
Files []*fsnode.File
FIPS bool
// Lock the root account in the deployment unless the user defined root
// user options in the build configuration.
LockRoot bool
}
func NewOSTreeDiskImage(commit ostree.SourceSpec) *OSTreeDiskImage {
func NewOSTreeDiskImageFromCommit(commit ostree.SourceSpec) *OSTreeDiskImage {
return &OSTreeDiskImage{
Base: NewBase("ostree-raw-image"),
CommitSource: commit,
CommitSource: &commit,
}
}
func NewOSTreeDiskImageFromContainer(container container.SourceSpec, ref string) *OSTreeDiskImage {
return &OSTreeDiskImage{
Base: NewBase("ostree-raw-image"),
ContainerSource: &container,
Ref: ref,
}
}
func baseRawOstreeImage(img *OSTreeDiskImage, m *manifest.Manifest, buildPipeline *manifest.Build) *manifest.RawOSTreeImage {
osPipeline := manifest.NewOSTreeDeployment(buildPipeline, m, img.CommitSource, img.OSName, img.Ignition, img.IgnitionPlatform, img.Platform)
var osPipeline *manifest.OSTreeDeployment
switch {
case img.CommitSource != nil:
osPipeline = manifest.NewOSTreeCommitDeployment(buildPipeline, m, img.CommitSource, img.OSName, img.Platform)
case img.ContainerSource != nil:
osPipeline = manifest.NewOSTreeContainerDeployment(buildPipeline, m, img.ContainerSource, img.Ref, img.OSName, img.Platform)
default:
panic("no content source defined for ostree image")
}
osPipeline.PartitionTable = img.PartitionTable
osPipeline.Remote = img.Remote
osPipeline.KernelOptionsAppend = img.KernelOptionsAppend
@ -69,6 +92,8 @@ func baseRawOstreeImage(img *OSTreeDiskImage, m *manifest.Manifest, buildPipelin
osPipeline.Directories = img.Directories
osPipeline.Files = img.Files
osPipeline.FIPS = img.FIPS
osPipeline.IgnitionPlatform = img.IgnitionPlatform
osPipeline.LockRoot = img.LockRoot
// other image types (e.g. live) pass the workload to the pipeline.
osPipeline.EnabledServices = img.Workload.GetServices()

View file

@ -9,6 +9,7 @@ import (
"github.com/osbuild/images/internal/fdo"
"github.com/osbuild/images/internal/ignition"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/manifest"
@ -93,7 +94,7 @@ func (img *OSTreeSimplifiedInstaller) InstantiateManifest(m *manifest.Manifest,
coiPipeline.ExtraRepos = img.ExtraBasePackages.Repositories
coiPipeline.FDO = img.FDO
coiPipeline.Ignition = img.IgnitionEmbedded
coiPipeline.Biosdevname = (img.Platform.GetArch() == platform.ARCH_X86_64)
coiPipeline.Biosdevname = (img.Platform.GetArch() == arch.ARCH_X86_64)
coiPipeline.Variant = img.Variant
coiPipeline.AdditionalDracutModules = img.AdditionalDracutModules
@ -146,7 +147,7 @@ func (img *OSTreeSimplifiedInstaller) InstantiateManifest(m *manifest.Manifest,
}
// enable ISOLinux on x86_64 only
isoLinuxEnabled := img.Platform.GetArch() == platform.ARCH_X86_64
isoLinuxEnabled := img.Platform.GetArch() == arch.ARCH_X86_64
isoTreePipeline := manifest.NewCoreOSISOTree(buildPipeline, compressedImage, coiPipeline, bootTreePipeline)
isoTreePipeline.KernelOpts = kernelOpts

View file

@ -6,6 +6,7 @@ import (
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/internal/users"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/ostree"
@ -107,7 +108,7 @@ func (p *AnacondaInstaller) anacondaBootPackageSet() []string {
}
switch p.platform.GetArch() {
case platform.ARCH_X86_64:
case arch.ARCH_X86_64:
packages = append(packages,
"grub2-efi-x64",
"grub2-efi-x64-cdboot",
@ -117,7 +118,7 @@ func (p *AnacondaInstaller) anacondaBootPackageSet() []string {
"syslinux",
"syslinux-nonlinux",
)
case platform.ARCH_AARCH64:
case arch.ARCH_AARCH64:
packages = append(packages,
"grub2-efi-aa64-cdboot",
"grub2-efi-aa64",

View file

@ -5,6 +5,7 @@ import (
"github.com/osbuild/images/internal/fdo"
"github.com/osbuild/images/internal/ignition"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/ostree"
@ -85,7 +86,7 @@ func (p *CoreOSInstaller) getBootPackages() []string {
// For Fedora, this will add a lot of duplicates, but we also add them here
// for RHEL and CentOS.
switch p.platform.GetArch() {
case platform.ARCH_X86_64:
case arch.ARCH_X86_64:
packages = append(packages,
"grub2-efi-x64",
"grub2-efi-x64-cdboot",
@ -95,7 +96,7 @@ func (p *CoreOSInstaller) getBootPackages() []string {
"syslinux",
"syslinux-nonlinux",
)
case platform.ARCH_AARCH64:
case arch.ARCH_AARCH64:
packages = append(packages,
"grub2-efi-aa64-cdboot",
"grub2-efi-aa64",

View file

@ -1,6 +1,7 @@
package manifest
import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
@ -33,11 +34,11 @@ func NewEFIBootTree(m *Manifest, buildPipeline *Build, product, version string)
func (p *EFIBootTree) serialize() osbuild.Pipeline {
pipeline := p.Base.serialize()
arch := p.Platform.GetArch().String()
a := p.Platform.GetArch().String()
var architectures []string
if arch == platform.ARCH_X86_64.String() {
if a == arch.ARCH_X86_64.String() {
architectures = []string{"X64"}
} else if arch == platform.ARCH_AARCH64.String() {
} else if a == arch.ARCH_AARCH64.String() {
architectures = []string{"AA64"}
} else {
panic("unsupported architecture")

View file

@ -11,6 +11,7 @@ import (
"github.com/osbuild/images/internal/shell"
"github.com/osbuild/images/internal/users"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/osbuild"
@ -610,7 +611,7 @@ func (p *OS) serialize() osbuild.Pipeline {
var bootloader *osbuild.Stage
switch p.platform.GetArch() {
case platform.ARCH_S390X:
case arch.ARCH_S390X:
bootloader = osbuild.NewZiplStage(new(osbuild.ZiplStageOptions))
default:
if p.NoBLS {

View file

@ -1,6 +1,7 @@
package manifest
import (
"fmt"
"os"
"strings"
@ -24,12 +25,25 @@ type OSTreeDeployment struct {
OSVersion string
commitSource ostree.SourceSpec
ostreeSpecs []ostree.CommitSpec
// commitSource represents the source that will be used to retrieve the
// ostree commit for this pipeline.
commitSource *ostree.SourceSpec
// ostreeSpec is the resolved commit that will be deployed in this pipeline.
ostreeSpec *ostree.CommitSpec
// containerSource represents the source that will be used to retrieve the
// ostree native container for this pipeline.
containerSource *container.SourceSpec
// containerSpec is the resolved ostree native container that will be
// deployed in this pipeline.
containerSpec *container.Spec
SysrootReadOnly bool
osName string
ref string
KernelOptionsAppend []string
Keyboard string
@ -42,11 +56,9 @@ type OSTreeDeployment struct {
PartitionTable *disk.PartitionTable
// Whether ignition is in use or not
ignition bool
// Specifies the ignition platform to use
ignitionPlatform string
// Specifies the ignition platform to use.
// If empty, ignition is not enabled.
IgnitionPlatform string
Directories []*fsnode.Directory
Files []*fsnode.File
@ -55,16 +67,18 @@ type OSTreeDeployment struct {
DisabledServices []string
FIPS bool
// Lock the root account in the deployment unless the user defined root
// user options in the build configuration.
LockRoot bool
}
// NewOSTreeDeployment creates a pipeline for an ostree deployment from a
// NewOSTreeCommitDeployment creates a pipeline for an ostree deployment from a
// commit.
func NewOSTreeDeployment(buildPipeline *Build,
func NewOSTreeCommitDeployment(buildPipeline *Build,
m *Manifest,
commit ostree.SourceSpec,
commit *ostree.SourceSpec,
osName string,
ignition bool,
ignitionPlatform string,
platform platform.Platform) *OSTreeDeployment {
p := &OSTreeDeployment{
@ -72,8 +86,27 @@ func NewOSTreeDeployment(buildPipeline *Build,
commitSource: commit,
osName: osName,
platform: platform,
ignition: ignition,
ignitionPlatform: ignitionPlatform,
}
buildPipeline.addDependent(p)
m.addPipeline(p)
return p
}
// NewOSTreeDeployment creates a pipeline for an ostree deployment from a
// container
func NewOSTreeContainerDeployment(buildPipeline *Build,
m *Manifest,
container *container.SourceSpec,
ref string,
osName string,
platform platform.Platform) *OSTreeDeployment {
p := &OSTreeDeployment{
Base: NewBase(m, "ostree-deployment", buildPipeline),
containerSource: container,
osName: osName,
ref: ref,
platform: platform,
}
buildPipeline.addDependent(p)
m.addPipeline(p)
@ -88,89 +121,76 @@ func (p *OSTreeDeployment) getBuildPackages(Distro) []string {
}
func (p *OSTreeDeployment) getOSTreeCommits() []ostree.CommitSpec {
return p.ostreeSpecs
if p.ostreeSpec == nil {
return []ostree.CommitSpec{}
}
return []ostree.CommitSpec{*p.ostreeSpec}
}
func (p *OSTreeDeployment) getOSTreeCommitSources() []ostree.SourceSpec {
if p.commitSource == nil {
return []ostree.SourceSpec{}
}
return []ostree.SourceSpec{
p.commitSource,
*p.commitSource,
}
}
func (p *OSTreeDeployment) getContainerSpecs() []container.Spec {
if p.containerSpec == nil {
return []container.Spec{}
}
return []container.Spec{*p.containerSpec}
}
func (p *OSTreeDeployment) getContainerSources() []container.SourceSpec {
if p.containerSource == nil {
return []container.SourceSpec{}
}
return []container.SourceSpec{
*p.containerSource,
}
}
func (p *OSTreeDeployment) serializeStart(packages []rpmmd.PackageSpec, containers []container.Spec, commits []ostree.CommitSpec) {
if len(p.ostreeSpecs) > 0 {
if p.ostreeSpec != nil || p.containerSpec != nil {
panic("double call to serializeStart()")
}
if len(commits) != 1 {
panic("pipeline requires exactly one ostree commit")
switch {
case len(commits) == 1:
p.ostreeSpec = &commits[0]
case len(containers) == 1:
p.containerSpec = &containers[0]
default:
panic(fmt.Sprintf("pipeline requires exactly one ostree commit or one container (have commits: %v; containers: %v)", commits, containers))
}
p.ostreeSpecs = commits
}
func (p *OSTreeDeployment) serializeEnd() {
if len(p.ostreeSpecs) == 0 {
switch {
case p.ostreeSpec == nil && p.containerSpec == nil:
panic("serializeEnd() call when serialization not in progress")
case p.ostreeSpec != nil && p.containerSpec != nil:
panic("serializeEnd() multiple payload sources defined")
}
p.ostreeSpecs = nil
p.ostreeSpec = nil
p.containerSpec = nil
}
func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
if len(p.ostreeSpecs) == 0 {
panic("serialization not started")
}
if len(p.ostreeSpecs) > 1 {
panic("multiple ostree commit specs found; this is a programming error")
}
commit := p.ostreeSpecs[0]
const repoPath = "/ostree/repo"
pipeline := p.Base.serialize()
pipeline.AddStage(osbuild.OSTreeInitFsStage())
func (p *OSTreeDeployment) doOSTreeSpec(pipeline *osbuild.Pipeline, repoPath string, kernelOpts []string) string {
commit := *p.ostreeSpec
ref := commit.Ref
pipeline.AddStage(osbuild.NewOSTreePullStage(
&osbuild.OSTreePullStageOptions{Repo: repoPath, Remote: p.Remote.Name},
osbuild.NewOstreePullStageInputs("org.osbuild.source", commit.Checksum, commit.Ref),
osbuild.NewOstreePullStageInputs("org.osbuild.source", commit.Checksum, ref),
))
pipeline.AddStage(osbuild.NewOSTreeOsInitStage(
&osbuild.OSTreeOsInitStageOptions{
OSName: p.osName,
},
))
pipeline.AddStage(osbuild.NewMkdirStage(&osbuild.MkdirStageOptions{
Paths: []osbuild.MkdirStagePath{
{
Path: "/boot/efi",
Mode: common.ToPtr(os.FileMode(0700)),
},
},
}))
kernelOpts := osbuild.GenImageKernelOptions(p.PartitionTable)
kernelOpts = append(kernelOpts, p.KernelOptionsAppend...)
if p.ignition {
if p.ignitionPlatform == "" {
panic("ignition is enabled but ignition platform ID is not set")
}
kernelOpts = append(kernelOpts,
"coreos.no_persist_ip", // users cannot add connections as we don't have a live iso, this prevents connections to bleed into the system from the ign initrd
"ignition.platform.id="+p.ignitionPlatform,
"$ignition_firstboot",
)
}
if p.FIPS {
kernelOpts = append(kernelOpts, osbuild.GenFIPSKernelOptions(p.PartitionTable)...)
p.Files = append(p.Files, osbuild.GenFIPSFiles()...)
}
pipeline.AddStage(osbuild.NewOSTreeDeployStage(
&osbuild.OSTreeDeployStageOptions{
OsName: p.osName,
Ref: commit.Ref,
Ref: ref,
Remote: p.Remote.Name,
Mounts: []string{"/boot", "/boot/efi"},
Rootfs: osbuild.Rootfs{
@ -200,11 +220,87 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
&osbuild.OSTreeFillvarStageOptions{
Deployment: osbuild.OSTreeDeployment{
OSName: p.osName,
Ref: commit.Ref,
Ref: ref,
},
},
))
return ref
}
func (p *OSTreeDeployment) doOSTreeContainerSpec(pipeline *osbuild.Pipeline, repoPath string, kernelOpts []string) string {
cont := *p.containerSpec
ref := p.ref
options := &osbuild.OSTreeDeployContainerStageOptions{
OsName: p.osName,
KernelOpts: p.KernelOptionsAppend,
// NOTE: setting the target imgref to be the container source but
// we should make this configurable
TargetImgref: fmt.Sprintf("ostree-remote-registry:%s:%s", p.Remote.Name, p.containerSpec.Source),
Mounts: []string{"/boot", "/boot/efi"},
Rootfs: &osbuild.Rootfs{
Label: "root",
},
}
images := osbuild.NewContainersInputForSources([]container.Spec{cont})
pipeline.AddStage(osbuild.NewOSTreeDeployContainerStage(options, images))
return ref
}
func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
switch {
case p.ostreeSpec == nil && p.containerSpec == nil:
panic("serialization not started")
case p.ostreeSpec != nil && p.containerSpec != nil:
panic("serialize() multiple payload sources defined")
}
const repoPath = "/ostree/repo"
pipeline := p.Base.serialize()
pipeline.AddStage(osbuild.OSTreeInitFsStage())
pipeline.AddStage(osbuild.NewOSTreeOsInitStage(
&osbuild.OSTreeOsInitStageOptions{
OSName: p.osName,
},
))
pipeline.AddStage(osbuild.NewMkdirStage(&osbuild.MkdirStageOptions{
Paths: []osbuild.MkdirStagePath{
{
Path: "/boot/efi",
Mode: common.ToPtr(os.FileMode(0700)),
},
},
}))
kernelOpts := osbuild.GenImageKernelOptions(p.PartitionTable)
kernelOpts = append(kernelOpts, p.KernelOptionsAppend...)
if p.IgnitionPlatform != "" {
kernelOpts = append(kernelOpts,
"ignition.platform.id="+p.IgnitionPlatform,
"$ignition_firstboot",
)
}
if p.FIPS {
kernelOpts = append(kernelOpts, osbuild.GenFIPSKernelOptions(p.PartitionTable)...)
p.Files = append(p.Files, osbuild.GenFIPSFiles()...)
}
var ref string
switch {
case p.ostreeSpec != nil:
ref = p.doOSTreeSpec(&pipeline, repoPath, kernelOpts)
case p.containerSpec != nil:
ref = p.doOSTreeContainerSpec(&pipeline, repoPath, kernelOpts)
default:
// this should be caught at the top of the function, but let's check
// again to avoid bugs from bad refactoring.
panic("no content source defined for ostree deployment")
}
configStage := osbuild.NewOSTreeConfigStage(
&osbuild.OSTreeConfigStageOptions{
Repo: repoPath,
@ -216,12 +312,12 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
},
},
)
configStage.MountOSTree(p.osName, commit.Ref, 0)
configStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(configStage)
fstabOptions := osbuild.NewFSTabStageOptions(p.PartitionTable)
fstabStage := osbuild.NewFSTabStage(fstabOptions)
fstabStage.MountOSTree(p.osName, commit.Ref, 0)
fstabStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(fstabStage)
if len(p.Users) > 0 {
@ -229,17 +325,17 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
if err != nil {
panic("password encryption failed")
}
usersStage.MountOSTree(p.osName, commit.Ref, 0)
usersStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(usersStage)
}
if len(p.Groups) > 0 {
grpStage := osbuild.GenGroupsStage(p.Groups)
grpStage.MountOSTree(p.osName, commit.Ref, 0)
grpStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(grpStage)
}
if p.ignition {
if p.IgnitionPlatform != "" {
pipeline.AddStage(osbuild.NewIgnitionStage(&osbuild.IgnitionStageOptions{
// This is a workaround to make the systemd believe it's firstboot when ignition runs on real firstboot.
// Right now, since we ship /etc/machine-id, systemd thinks it's not firstboot and ignition depends on it
@ -260,7 +356,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
// creating a preset file.
if len(p.EnabledServices) != 0 || len(p.DisabledServices) != 0 {
presetsStage := osbuild.GenServicesPresetStage(p.EnabledServices, p.DisabledServices)
presetsStage.MountOSTree(p.osName, commit.Ref, 0)
presetsStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(presetsStage)
}
}
@ -274,7 +370,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
}
}
if !hasRoot {
if p.LockRoot && !hasRoot {
userOptions := &osbuild.UsersStageOptions{
Users: map[string]osbuild.UsersStageOptionsUser{
"root": {
@ -283,7 +379,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
},
}
rootLockStage := osbuild.NewUsersStage(userOptions)
rootLockStage.MountOSTree(p.osName, commit.Ref, 0)
rootLockStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(rootLockStage)
}
@ -292,7 +388,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
Keymap: p.Keyboard,
}
keymapStage := osbuild.NewKeymapStage(options)
keymapStage.MountOSTree(p.osName, commit.Ref, 0)
keymapStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(keymapStage)
}
@ -301,13 +397,13 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
Language: p.Locale,
}
localeStage := osbuild.NewLocaleStage(options)
localeStage.MountOSTree(p.osName, commit.Ref, 0)
localeStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(localeStage)
}
if p.FIPS {
for _, stage := range osbuild.GenFIPSStages() {
stage.MountOSTree(p.osName, commit.Ref, 0)
stage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(stage)
}
}
@ -319,21 +415,21 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
p.platform.GetBIOSPlatform(),
p.platform.GetUEFIVendor(), true)
grubOptions.Greenboot = true
grubOptions.Ignition = p.ignition
grubOptions.Ignition = p.IgnitionPlatform != ""
grubOptions.Config = &osbuild.GRUB2Config{
Default: "saved",
Timeout: 1,
TerminalOutput: []string{"console"},
}
bootloader := osbuild.NewGRUB2Stage(grubOptions)
bootloader.MountOSTree(p.osName, commit.Ref, 0)
bootloader.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(bootloader)
// First create custom directories, because some of the files may depend on them
if len(p.Directories) > 0 {
dirStages := osbuild.GenDirectoryNodesStages(p.Directories)
for _, stage := range dirStages {
stage.MountOSTree(p.osName, commit.Ref, 0)
stage.MountOSTree(p.osName, ref, 0)
}
pipeline.AddStages(dirStages...)
}
@ -341,7 +437,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
if len(p.Files) > 0 {
fileStages := osbuild.GenFileNodesStages(p.Files)
for _, stage := range fileStages {
stage.MountOSTree(p.osName, commit.Ref, 0)
stage.MountOSTree(p.osName, ref, 0)
}
pipeline.AddStages(fileStages...)
}
@ -351,7 +447,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
EnabledServices: p.EnabledServices,
DisabledServices: p.DisabledServices,
})
systemdStage.MountOSTree(p.osName, commit.Ref, 0)
systemdStage.MountOSTree(p.osName, ref, 0)
pipeline.AddStage(systemdStage)
}
@ -359,7 +455,7 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline {
&osbuild.OSTreeSelinuxStageOptions{
Deployment: osbuild.OSTreeDeployment{
OSName: p.osName,
Ref: commit.Ref,
Ref: ref,
},
},
))

View file

@ -0,0 +1,57 @@
package manifest
import (
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/osbuild"
)
type OSTreeEncapsulate struct {
Base
filename string
inputPipeline Pipeline
}
func NewOSTreeEncapsulate(buildPipeline *Build, inputPipeline Pipeline, pipelinename string) *OSTreeEncapsulate {
p := &OSTreeEncapsulate{
Base: NewBase(inputPipeline.Manifest(), pipelinename, buildPipeline),
inputPipeline: inputPipeline,
filename: "bootable-container.tar",
}
buildPipeline.addDependent(p)
inputPipeline.Manifest().addPipeline(p)
return p
}
func (p OSTreeEncapsulate) Filename() string {
return p.filename
}
func (p *OSTreeEncapsulate) SetFilename(filename string) {
p.filename = filename
}
func (p *OSTreeEncapsulate) serialize() osbuild.Pipeline {
pipeline := p.Base.serialize()
encOptions := &osbuild.OSTreeEncapsulateStageOptions{
Filename: p.Filename(),
}
encStage := osbuild.NewOSTreeEncapsulateStage(encOptions, p.inputPipeline.Name())
pipeline.AddStage(encStage)
return pipeline
}
func (p *OSTreeEncapsulate) getBuildPackages(Distro) []string {
return []string{
"rpm-ostree",
"python3-pyyaml",
}
}
func (p *OSTreeEncapsulate) Export() *artifact.Artifact {
p.Base.export = true
mimeType := "application/x-tar"
return artifact.New(p.Name(), p.Filename(), &mimeType)
}

View file

@ -1,9 +1,9 @@
package manifest
import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
// A RawImage represents a raw image file which can be booted in a
@ -65,7 +65,7 @@ func (p *RawImage) serialize() osbuild.Pipeline {
}
switch p.treePipeline.platform.GetArch() {
case platform.ARCH_S390X:
case arch.ARCH_S390X:
loopback := osbuild.NewLoopbackDevice(&osbuild.LoopbackDeviceOptions{Filename: p.Filename()})
pipeline.AddStage(osbuild.NewZiplInstStage(osbuild.NewZiplInstStageOptions(p.treePipeline.kernelVer, pt), loopback, copyDevices, copyMounts))
default:

View file

@ -76,7 +76,7 @@ func (p *RawOSTreeImage) serialize() osbuild.Pipeline {
_, bootCopyDevices, bootCopyMounts := osbuild.GenCopyFSTreeOptions(inputName, p.treePipeline.Name(), p.Filename(), pt)
bootCopyOptions := &osbuild.CopyStageOptions{}
commit := p.treePipeline.ostreeSpecs[0]
commit := p.treePipeline.ostreeSpec
commitChecksum := commit.Checksum
bootCopyInputs := osbuild.OSTreeCheckoutInputs{

View file

@ -6,6 +6,7 @@ import (
type ContainersInputReferences interface {
isContainersInputReferences()
Len() int
}
type ContainersInputSourceRef struct {
@ -16,6 +17,10 @@ type ContainersInputSourceMap map[string]ContainersInputSourceRef
func (ContainersInputSourceMap) isContainersInputReferences() {}
func (cism ContainersInputSourceMap) Len() int {
return len(cism)
}
type ContainersInput struct {
inputCommon
References ContainersInputReferences `json:"references"`

View file

@ -0,0 +1,75 @@
package osbuild
import (
"fmt"
"regexp"
)
// adapted from the osbuild stage schema - keep in sync if it ever changes
const ostreeContainerTargetImgrefRegex = "^(ostree-remote-registry|ostree-image-signed|ostree-unverified-registry):.*$"
// Options for the org.osbuild.ostree.deploy.container stage.
type OSTreeDeployContainerStageOptions struct {
// Name of the stateroot to be used in the deployment
OsName string `json:"osname"`
// Additional kernel command line options
KernelOpts []string `json:"kernel_opts,omitempty"`
// Image ref used as the source of truth for updates
TargetImgref string `json:"target_imgref"`
// Identifier to locate the root file system (uuid or label)
Rootfs *Rootfs `json:"rootfs,omitempty"`
// Mount points of the final file system
Mounts []string `json:"mounts,omitempty"`
}
func (OSTreeDeployContainerStageOptions) isStageOptions() {}
func (options OSTreeDeployContainerStageOptions) validate() error {
if options.OsName == "" {
return fmt.Errorf("osname is required")
}
exp := regexp.MustCompile(ostreeContainerTargetImgrefRegex)
if !exp.MatchString(options.TargetImgref) {
return fmt.Errorf("'target_imgref' %q doesn't conform to schema (%s)", options.TargetImgref, exp.String())
}
return nil
}
type OSTreeDeployContainerInputs struct {
Images ContainersInput `json:"images"`
}
func (OSTreeDeployContainerInputs) isStageInputs() {}
func (inputs OSTreeDeployContainerInputs) validate() error {
if inputs.Images.References == nil {
return fmt.Errorf("stage requires exactly 1 input container (got nil References)")
}
if ncontainers := inputs.Images.References.Len(); ncontainers != 1 {
return fmt.Errorf("stage requires exactly 1 input container (got %d)", ncontainers)
}
return nil
}
func NewOSTreeDeployContainerStage(options *OSTreeDeployContainerStageOptions, images ContainersInput) *Stage {
if err := options.validate(); err != nil {
panic(err)
}
inputs := OSTreeDeployContainerInputs{
Images: images,
}
if err := inputs.validate(); err != nil {
panic(err)
}
return &Stage{
Type: "org.osbuild.ostree.deploy.container",
Options: options,
Inputs: inputs,
}
}

View file

@ -0,0 +1,53 @@
package osbuild
type OSTreeEncapsulateStageOptions struct {
// Resulting image filename
Filename string `json:"filename"`
Cmd []string `json:"cmd,omitempty"`
// Propagate an OSTree commit metadata key to container label
CopyMeta []string `json:"copymeta,omitempty"`
// The encapsulated container format version (default 1)
FormatVersion *int `json:"format_version,omitempty"`
// Additional labels for the container
Labels []string `json:"labels,omitempty"`
// Max number of container image layers
MaxLayers *int `json:"max_layers,omitempty"`
}
func (OSTreeEncapsulateStageOptions) isStageOptions() {}
type OSTreeEncapsulateStageInput struct {
inputCommon
References []string `json:"references"`
}
func (OSTreeEncapsulateStageInput) isStageInput() {}
type OSTreeEncapsulateStageInputs struct {
Commit *OSTreeEncapsulateStageInput `json:"commit"`
}
func (OSTreeEncapsulateStageInputs) isStageInputs() {}
func NewOSTreeEncapsulateStage(options *OSTreeEncapsulateStageOptions, inputPipeline string) *Stage {
return &Stage{
Type: "org.osbuild.ostree.encapsulate",
Options: options,
Inputs: NewOSTreeEncapsulateStageInputs(InputOriginPipeline, inputPipeline),
}
}
func NewOSTreeEncapsulateStageInputs(origin, pipeline string) *OSTreeEncapsulateStageInputs {
encStageInput := new(OSTreeEncapsulateStageInput)
encStageInput.Type = "org.osbuild.ostree"
encStageInput.Origin = origin
inputRefs := []string{"name:" + pipeline}
encStageInput.References = inputRefs
return &OSTreeEncapsulateStageInputs{Commit: encStageInput}
}

View file

@ -1,12 +1,16 @@
package platform
import (
"github.com/osbuild/images/pkg/arch"
)
type Aarch64 struct {
BasePlatform
UEFIVendor string
}
func (p *Aarch64) GetArch() Arch {
return ARCH_AARCH64
func (p *Aarch64) GetArch() arch.Arch {
return arch.ARCH_AARCH64
}
func (p *Aarch64) GetUEFIVendor() string {
@ -34,8 +38,8 @@ type Aarch64_Fedora struct {
BootFiles [][2]string
}
func (p *Aarch64_Fedora) GetArch() Arch {
return ARCH_AARCH64
func (p *Aarch64_Fedora) GetArch() arch.Arch {
return arch.ARCH_AARCH64
}
func (p *Aarch64_Fedora) GetUEFIVendor() string {

View file

@ -1,15 +1,11 @@
package platform
type Arch uint64
type ImageFormat uint64
const ( // architecture enum
ARCH_AARCH64 Arch = iota
ARCH_PPC64LE
ARCH_S390X
ARCH_X86_64
import (
"github.com/osbuild/images/pkg/arch"
)
type ImageFormat uint64
const ( // image format enum
FORMAT_UNSET ImageFormat = iota
FORMAT_RAW
@ -21,21 +17,6 @@ const ( // image format enum
FORMAT_OVA
)
func (a Arch) String() string {
switch a {
case ARCH_AARCH64:
return "aarch64"
case ARCH_PPC64LE:
return "ppc64le"
case ARCH_S390X:
return "s390x"
case ARCH_X86_64:
return "x86_64"
default:
panic("invalid architecture")
}
}
func (f ImageFormat) String() string {
switch f {
case FORMAT_RAW:
@ -58,7 +39,7 @@ func (f ImageFormat) String() string {
}
type Platform interface {
GetArch() Arch
GetArch() arch.Arch
GetImageFormat() ImageFormat
GetQCOW2Compat() string
GetBIOSPlatform() string

View file

@ -1,12 +1,16 @@
package platform
import (
"github.com/osbuild/images/pkg/arch"
)
type PPC64LE struct {
BasePlatform
BIOS bool
}
func (p *PPC64LE) GetArch() Arch {
return ARCH_PPC64LE
func (p *PPC64LE) GetArch() arch.Arch {
return arch.ARCH_PPC64LE
}
func (p *PPC64LE) GetBIOSPlatform() string {

View file

@ -1,12 +1,16 @@
package platform
import (
"github.com/osbuild/images/pkg/arch"
)
type S390X struct {
BasePlatform
Zipl bool
}
func (p *S390X) GetArch() Arch {
return ARCH_S390X
func (p *S390X) GetArch() arch.Arch {
return arch.ARCH_S390X
}
func (p *S390X) GetZiplSupport() bool {

View file

@ -1,5 +1,9 @@
package platform
import (
"github.com/osbuild/images/pkg/arch"
)
type X86BootLoader uint64
type X86 struct {
@ -8,8 +12,8 @@ type X86 struct {
UEFIVendor string
}
func (p *X86) GetArch() Arch {
return ARCH_X86_64
func (p *X86) GetArch() arch.Arch {
return arch.ARCH_X86_64
}
func (p *X86) GetBIOSPlatform() string {

View file

@ -137,25 +137,6 @@ func (ps PackageSet) Append(other PackageSet) PackageSet {
return ps
}
// ResolveConflictsExclude resolves conflicting Include and Exclude package lists
// content by deleting packages listed as Excluded from the Include list.
func (ps PackageSet) ResolveConflictsExclude() PackageSet {
excluded := map[string]struct{}{}
for _, pkg := range ps.Exclude {
excluded[pkg] = struct{}{}
}
newInclude := []string{}
for _, pkg := range ps.Include {
_, found := excluded[pkg]
if !found {
newInclude = append(newInclude, pkg)
}
}
ps.Include = newInclude
return ps
}
// TODO: the public API of this package should not be reused for serialization.
type PackageSpec struct {
Name string `json:"name"`

View file

@ -733,13 +733,14 @@ func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag
return true
}
// ReadOptionalASN1Boolean sets *out to the value of the next ASN.1 BOOLEAN or,
// if the next bytes are not an ASN.1 BOOLEAN, to the value of defaultValue.
// It reports whether the operation was successful.
func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool {
// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN
// explicitly tagged with tag into out and advances. If no element with a
// matching tag is present, it sets "out" to defaultValue instead. It reports
// whether the read was successful.
func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool {
var present bool
var child String
if !s.ReadOptionalASN1(&child, &present, asn1.BOOLEAN) {
if !s.ReadOptionalASN1(&child, &present, tag) {
return false
}
@ -748,7 +749,7 @@ func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool {
return true
}
return s.ReadASN1Boolean(out)
return child.ReadASN1Boolean(out)
}
func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool {

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build dragonfly || freebsd || linux || netbsd || openbsd
//go:build dragonfly || freebsd || linux || netbsd
package unix

View file

@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
}
// IoctlLoopConfigure configures all loop device parameters in a single step
func IoctlLoopConfigure(fd int, value *LoopConfig) error {
return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value))
}

View file

@ -519,6 +519,7 @@ ccflags="$@"
$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
$2 ~ /^LO_(KEY|NAME)_SIZE$/ ||
$2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ ||
$2 == "LOOP_CONFIGURE" ||
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
@ -560,7 +561,7 @@ ccflags="$@"
$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ ||
$2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
$2 ~ /^CLONE_[A-Z_]+/ ||
$2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ &&
$2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ &&
$2 ~ /^(BPF|DLT)_/ ||
$2 ~ /^AUDIT_/ ||
$2 ~ /^(CLOCK|TIMER)_/ ||

View file

@ -316,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) {
if err != nil {
return "", err
}
return string(buf[:vallen-1]), nil
return ByteSliceToString(buf[:vallen]), nil
}
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)

View file

@ -61,16 +61,24 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) (
}
//sys fchmodat(dirfd int, path string, mode uint32) (err error)
//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error)
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
// Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior
// and check the flags. Otherwise the mode would be applied to the symlink
// destination which is not what the user expects.
if flags&^AT_SYMLINK_NOFOLLOW != 0 {
func Fchmodat(dirfd int, path string, mode uint32, flags int) error {
// Linux fchmodat doesn't support the flags parameter, but fchmodat2 does.
// Try fchmodat2 if flags are specified.
if flags != 0 {
err := fchmodat2(dirfd, path, mode, flags)
if err == ENOSYS {
// fchmodat2 isn't available. If the flags are known to be valid,
// return EOPNOTSUPP to indicate that fchmodat doesn't support them.
if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 {
return EINVAL
} else if flags&AT_SYMLINK_NOFOLLOW != 0 {
} else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 {
return EOPNOTSUPP
}
}
return err
}
return fchmodat(dirfd, path, mode)
}
@ -1302,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) {
return "", err
}
}
return string(buf[:vallen-1]), nil
return ByteSliceToString(buf[:vallen]), nil
}
func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) {

View file

@ -166,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) {
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
//sys fcntl(fd int, cmd int, arg int) (n int, err error)
//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL
// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return fcntl(int(fd), cmd, arg)
}
// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
_, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk))
return err
}
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {

View file

@ -158,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) {
if err != nil {
return "", err
}
return string(buf[:vallen-1]), nil
return ByteSliceToString(buf[:vallen]), nil
}
const ImplementsGetwd = true

View file

@ -1104,7 +1104,7 @@ func GetsockoptString(fd, level, opt int) (string, error) {
return "", err
}
return string(buf[:vallen-1]), nil
return ByteSliceToString(buf[:vallen]), nil
}
func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {

View file

@ -486,7 +486,6 @@ const (
BPF_F_ANY_ALIGNMENT = 0x2
BPF_F_BEFORE = 0x8
BPF_F_ID = 0x20
BPF_F_LINK = 0x2000
BPF_F_NETFILTER_IP_DEFRAG = 0x1
BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_REPLACE = 0x4
@ -1802,6 +1801,7 @@ const (
LOCK_SH = 0x1
LOCK_UN = 0x8
LOOP_CLR_FD = 0x4c01
LOOP_CONFIGURE = 0x4c0a
LOOP_CTL_ADD = 0x4c80
LOOP_CTL_GET_FREE = 0x4c82
LOOP_CTL_REMOVE = 0x4c81

View file

@ -37,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {

View file

@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fcntl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4
DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4

View file

@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fcntl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8
DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8

View file

@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fcntl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4
DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4

View file

@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fcntl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8
DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8

View file

@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fcntl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8
DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8

View file

@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fcntl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
CALL libc_fcntl(SB)
RET
GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8
DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
CALL libc_ppoll(SB)
RET

View file

@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fcntl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) {
r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fcntl(SB)
GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8
DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8

View file

@ -2671,6 +2671,7 @@ const (
BPF_PROG_TYPE_LSM = 0x1d
BPF_PROG_TYPE_SK_LOOKUP = 0x1e
BPF_PROG_TYPE_SYSCALL = 0x1f
BPF_PROG_TYPE_NETFILTER = 0x20
BPF_CGROUP_INET_INGRESS = 0x0
BPF_CGROUP_INET_EGRESS = 0x1
BPF_CGROUP_INET_SOCK_CREATE = 0x2
@ -2715,6 +2716,11 @@ const (
BPF_PERF_EVENT = 0x29
BPF_TRACE_KPROBE_MULTI = 0x2a
BPF_LSM_CGROUP = 0x2b
BPF_STRUCT_OPS = 0x2c
BPF_NETFILTER = 0x2d
BPF_TCX_INGRESS = 0x2e
BPF_TCX_EGRESS = 0x2f
BPF_TRACE_UPROBE_MULTI = 0x30
BPF_LINK_TYPE_UNSPEC = 0x0
BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1
BPF_LINK_TYPE_TRACING = 0x2
@ -2725,6 +2731,18 @@ const (
BPF_LINK_TYPE_PERF_EVENT = 0x7
BPF_LINK_TYPE_KPROBE_MULTI = 0x8
BPF_LINK_TYPE_STRUCT_OPS = 0x9
BPF_LINK_TYPE_NETFILTER = 0xa
BPF_LINK_TYPE_TCX = 0xb
BPF_LINK_TYPE_UPROBE_MULTI = 0xc
BPF_PERF_EVENT_UNSPEC = 0x0
BPF_PERF_EVENT_UPROBE = 0x1
BPF_PERF_EVENT_URETPROBE = 0x2
BPF_PERF_EVENT_KPROBE = 0x3
BPF_PERF_EVENT_KRETPROBE = 0x4
BPF_PERF_EVENT_TRACEPOINT = 0x5
BPF_PERF_EVENT_EVENT = 0x6
BPF_F_KPROBE_MULTI_RETURN = 0x1
BPF_F_UPROBE_MULTI_RETURN = 0x1
BPF_ANY = 0x0
BPF_NOEXIST = 0x1
BPF_EXIST = 0x2
@ -2742,6 +2760,8 @@ const (
BPF_F_MMAPABLE = 0x400
BPF_F_PRESERVE_ELEMS = 0x800
BPF_F_INNER_MAP = 0x1000
BPF_F_LINK = 0x2000
BPF_F_PATH_FD = 0x4000
BPF_STATS_RUN_TIME = 0x0
BPF_STACK_BUILD_ID_EMPTY = 0x0
BPF_STACK_BUILD_ID_VALID = 0x1
@ -2762,6 +2782,7 @@ const (
BPF_F_ZERO_CSUM_TX = 0x2
BPF_F_DONT_FRAGMENT = 0x4
BPF_F_SEQ_NUMBER = 0x8
BPF_F_NO_TUNNEL_KEY = 0x10
BPF_F_TUNINFO_FLAGS = 0x10
BPF_F_INDEX_MASK = 0xffffffff
BPF_F_CURRENT_CPU = 0xffffffff
@ -2778,6 +2799,8 @@ const (
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20
BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40
BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80
BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
BPF_F_SYSCTL_BASE_NAME = 0x1
@ -2866,6 +2889,8 @@ const (
BPF_DEVCG_DEV_CHAR = 0x2
BPF_FIB_LOOKUP_DIRECT = 0x1
BPF_FIB_LOOKUP_OUTPUT = 0x2
BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4
BPF_FIB_LOOKUP_TBID = 0x8
BPF_FIB_LKUP_RET_SUCCESS = 0x0
BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
@ -2901,6 +2926,7 @@ const (
BPF_CORE_ENUMVAL_EXISTS = 0xa
BPF_CORE_ENUMVAL_VALUE = 0xb
BPF_CORE_TYPE_MATCHES = 0xc
BPF_F_TIMER_ABS = 0x1
)
const (
@ -2979,6 +3005,12 @@ type LoopInfo64 struct {
Encrypt_key [32]uint8
Init [2]uint64
}
type LoopConfig struct {
Fd uint32
Size uint32
Info LoopInfo64
_ [8]uint64
}
type TIPCSocketAddr struct {
Ref uint32

View file

@ -155,6 +155,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW
//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW
//sys SetDefaultDllDirectories(directoryFlags uint32) (err error)
//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory
//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory
//sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW
//sys GetVersion() (ver uint32, err error)
//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW

View file

@ -184,6 +184,7 @@ var (
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
procCancelIo = modkernel32.NewProc("CancelIo")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
@ -330,6 +331,7 @@ var (
procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory")
procReleaseMutex = modkernel32.NewProc("ReleaseMutex")
procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW")
procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory")
procResetEvent = modkernel32.NewProc("ResetEvent")
procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
procResumeThread = modkernel32.NewProc("ResumeThread")
@ -1605,6 +1607,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
return
}
func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
cookie = uintptr(r0)
if cookie == 0 {
err = errnoErr(e1)
}
return
}
func AssignProcessToJobObject(job Handle, process Handle) (err error) {
r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0)
if r1 == 0 {
@ -2879,6 +2890,14 @@ func RemoveDirectory(path *uint16) (err error) {
return
}
func RemoveDllDirectory(cookie uintptr) (err error) {
r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func ResetEvent(event Handle) (err error) {
r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0)
if r1 == 0 {

View file

@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit {
// or its associated context.Context is canceled.
//
// The methods AllowN, ReserveN, and WaitN consume n tokens.
//
// Limiter is safe for simultaneous use by multiple goroutines.
type Limiter struct {
mu sync.Mutex
limit Limit

View file

@ -55,6 +55,8 @@ type DialSettings struct {
EnableDirectPathXds bool
EnableNewAuthLibrary bool
AllowNonDefaultServiceAccount bool
UniverseDomain string
DefaultUniverseDomain string
// Google API system parameters. For more information please read:
// https://cloud.google.com/apis/docs/system-parameters

View file

@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "0.151.0"
const Version = "0.153.0"

Some files were not shown because too many files have changed in this diff Show more