deps: update osbuild/images to 9548bf0d0140
Update osbuild/images dependency to osbuild/images@9548bf0d01
This commit is contained in:
parent
139bf4dec2
commit
fb3761d602
98 changed files with 1883 additions and 629 deletions
24
go.mod
24
go.mod
|
|
@ -12,7 +12,7 @@ require (
|
|||
github.com/Azure/go-autorest/autorest v0.11.29
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
|
||||
github.com/BurntSushi/toml v1.3.2
|
||||
github.com/aws/aws-sdk-go v1.44.316
|
||||
github.com/aws/aws-sdk-go v1.44.318
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/deepmap/oapi-codegen v1.8.2
|
||||
|
|
@ -31,7 +31,7 @@ require (
|
|||
github.com/labstack/gommon v0.4.0
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.315
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0
|
||||
github.com/osbuild/images v0.0.0-20230804084728-03212162ff49
|
||||
github.com/osbuild/images v0.0.0-20230808122821-9548bf0d0140
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/segmentio/ksuid v1.0.4
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
|
|
@ -40,10 +40,10 @@ require (
|
|||
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453
|
||||
github.com/vmware/govmomi v0.30.7
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/oauth2 v0.10.0
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.10.0
|
||||
google.golang.org/api v0.134.0
|
||||
golang.org/x/sys v0.11.0
|
||||
google.golang.org/api v0.135.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -68,7 +68,7 @@ require (
|
|||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containers/common v0.55.2 // indirect
|
||||
github.com/containers/image/v5 v5.26.1 // indirect
|
||||
github.com/containers/image/v5 v5.27.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/containers/ocicrypt v1.1.7 // indirect
|
||||
github.com/containers/storage v1.48.0 // indirect
|
||||
|
|
@ -162,19 +162,19 @@ require (
|
|||
go.mongodb.org/mongo-driver v1.11.3 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/term v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.9.3 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect
|
||||
google.golang.org/grpc v1.56.2 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
google.golang.org/grpc v1.57.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
|
|
|||
48
go.sum
48
go.sum
|
|
@ -100,8 +100,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
|
|||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-sdk-go v1.44.316 h1:UC3alCEyzj2XU13ZFGIOHW3yjCNLGTIGVauyetl9fwE=
|
||||
github.com/aws/aws-sdk-go v1.44.316/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.318 h1:Yl66rpbQHFUbxe9JBKLcvOvRivhVgP6+zH0b9KzARX8=
|
||||
github.com/aws/aws-sdk-go v1.44.318/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
|
@ -130,8 +130,8 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
|
|||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/containers/common v0.55.2 h1:Cd+vmkUPDrPvL2v4Te1Wew6SIZdn4/XiyiBRT9IbcGg=
|
||||
github.com/containers/common v0.55.2/go.mod h1:ZKPllYOZ2xj2rgWRdnHHVvWg6ru4BT28En8mO8DMMPk=
|
||||
github.com/containers/image/v5 v5.26.1 h1:8y3xq8GO/6y8FR+nAedHPsAFiAtOrab9qHTBpbqaX8g=
|
||||
github.com/containers/image/v5 v5.26.1/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA=
|
||||
github.com/containers/image/v5 v5.27.0 h1:4jKVWAa4YurTWUyAWMoC71zJkSylBR7pWd0jqGkukYc=
|
||||
github.com/containers/image/v5 v5.27.0/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U=
|
||||
|
|
@ -580,8 +580,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.315 h1:e4kDMkrWGyl90zF1dTD+GzcQlO5E8
|
|||
github.com/openshift-online/ocm-sdk-go v0.1.315/go.mod h1:KYOw8kAKAHyPrJcQoVR82CneQ4ofC02Na4cXXaTq4Nw=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
|
||||
github.com/osbuild/images v0.0.0-20230804084728-03212162ff49 h1:euwgCymcfkTkAew7NjNqvvYiODuph3F/dl24RqHiytQ=
|
||||
github.com/osbuild/images v0.0.0-20230804084728-03212162ff49/go.mod h1:KvRGkMnSQPmVxPeC4NQV0YYCvsiuGTBm76rgfMYKRIg=
|
||||
github.com/osbuild/images v0.0.0-20230808122821-9548bf0d0140 h1:uYx9o/pMdA6AOcpGTTNGls6OiqeH3rUl1T+ZXDTGuaQ=
|
||||
github.com/osbuild/images v0.0.0-20230808122821-9548bf0d0140/go.mod h1:gmbUNHC/stHhleacLSRnwOH3zZUMgtwoJ7rLY+XwxlU=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
|
@ -774,8 +774,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
|
|||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -858,16 +858,16 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -950,15 +950,15 @@ golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -971,8 +971,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -1058,8 +1058,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw=
|
||||
google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk=
|
||||
google.golang.org/api v0.135.0 h1:6Vgfj6uPMXcyy66waYWBwmkeNB+9GmUlJDOzkukPQYQ=
|
||||
google.golang.org/api v0.135.0/go.mod h1:Bp77uRFgwsSKI0BWH573F5Q6wSlznwI2NFayLOp/7mQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -1102,8 +1102,8 @@ google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIY
|
|||
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e h1:S83+ibolgyZ0bqz7KEsUOPErxcv4VzlszxY+31OfB/E=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
@ -1120,8 +1120,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
|
|||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
|
||||
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
|
|||
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
|
@ -23162,6 +23162,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -28656,6 +28659,25 @@ var awsPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"tnb": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"transcribe": service{
|
||||
Defaults: endpointDefaults{
|
||||
defaultKey{}: endpoint{
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.316"
|
||||
const SDKVersion = "1.44.318"
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
8
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
|
|
@ -83,12 +83,12 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
|||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
// === Report progress using the ic.c.progress channel, if required.
|
||||
if ic.c.progress != nil && ic.c.progressInterval > 0 {
|
||||
// === Report progress using the ic.c.options.Progress channel, if required.
|
||||
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
|
||||
progressReader := newProgressReader(
|
||||
stream.reader,
|
||||
ic.c.progress,
|
||||
ic.c.progressInterval,
|
||||
ic.c.options.Progress,
|
||||
ic.c.options.ProgressInterval,
|
||||
srcInfo,
|
||||
)
|
||||
defer progressReader.reportDone()
|
||||
|
|
|
|||
107
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
107
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/transports"
|
||||
|
|
@ -126,36 +127,46 @@ type Options struct {
|
|||
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
|
||||
// to not indicate "nondistributable".
|
||||
DownloadForeignLayers bool
|
||||
|
||||
// Contains slice of OptionCompressionVariant, where copy will ensure that for each platform
|
||||
// in the manifest list, a variant with the requested compression will exist.
|
||||
// Invalid when copying a non-multi-architecture image. That will probably
|
||||
// change in the future.
|
||||
EnsureCompressionVariantsExist []OptionCompressionVariant
|
||||
}
|
||||
|
||||
// OptionCompressionVariant allows to supply information about
|
||||
// selected compression algorithm and compression level by the
|
||||
// end-user. Refer to EnsureCompressionVariantsExist to know
|
||||
// more about its usage.
|
||||
type OptionCompressionVariant struct {
|
||||
Algorithm compression.Algorithm
|
||||
Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance
|
||||
}
|
||||
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
// The owner must call close() when done.
|
||||
type copier struct {
|
||||
dest private.ImageDestination
|
||||
rawSource private.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
policyContext *signature.PolicyContext
|
||||
dest private.ImageDestination
|
||||
rawSource private.ImageSource
|
||||
options *Options // never nil
|
||||
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
|
||||
unparsedToplevel *image.UnparsedImage // for rawSource
|
||||
blobInfoCache internalblobinfocache.BlobInfoCache2
|
||||
ociDecryptConfig *encconfig.DecryptConfig
|
||||
ociEncryptConfig *encconfig.EncryptConfig
|
||||
concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs
|
||||
downloadForeignLayers bool
|
||||
signers []*signer.Signer // Signers to use to create new signatures for the image
|
||||
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
|
||||
signers []*signer.Signer // Signers to use to create new signatures for the image
|
||||
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
// source image admissibility. It returns the manifest which was written to
|
||||
// the new copy of the image.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) {
|
||||
// NOTE this function uses an output parameter for the error return value.
|
||||
// Setting this and returning is the ideal way to return an error.
|
||||
//
|
||||
// the defers in this routine will wrap the error return with its own errors
|
||||
// which can be valuable context in the middle of a multi-streamed copy.
|
||||
if options == nil {
|
||||
options = &Options{}
|
||||
}
|
||||
|
|
@ -209,27 +220,27 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
}
|
||||
|
||||
c := &copier{
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
reportWriter: reportWriter,
|
||||
progressOutput: progressOutput,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
policyContext: policyContext,
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
options: options,
|
||||
|
||||
reportWriter: reportWriter,
|
||||
progressOutput: progressOutput,
|
||||
|
||||
unparsedToplevel: image.UnparsedInstance(rawSource, nil),
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
|
||||
ociDecryptConfig: options.OciDecryptConfig,
|
||||
ociEncryptConfig: options.OciEncryptConfig,
|
||||
downloadForeignLayers: options.DownloadForeignLayers,
|
||||
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
|
||||
}
|
||||
defer c.close()
|
||||
|
||||
// Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
|
||||
if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
|
||||
c.concurrentBlobCopiesSemaphore = options.ConcurrentBlobCopiesSemaphore
|
||||
c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore
|
||||
if c.concurrentBlobCopiesSemaphore == nil {
|
||||
max := options.MaxParallelDownloads
|
||||
max := c.options.MaxParallelDownloads
|
||||
if max == 0 {
|
||||
max = maxParallelDownloads
|
||||
}
|
||||
|
|
@ -237,33 +248,40 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
}
|
||||
} else {
|
||||
c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1))
|
||||
if options.ConcurrentBlobCopiesSemaphore != nil {
|
||||
if err := options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
|
||||
if c.options.ConcurrentBlobCopiesSemaphore != nil {
|
||||
if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
|
||||
return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err)
|
||||
}
|
||||
defer options.ConcurrentBlobCopiesSemaphore.Release(1)
|
||||
defer c.options.ConcurrentBlobCopiesSemaphore.Release(1)
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.setupSigners(options); err != nil {
|
||||
if err := c.setupSigners(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
multiImage, err := isMultiImage(ctx, c.unparsedToplevel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
|
||||
if !multiImage {
|
||||
if len(options.EnsureCompressionVariantsExist) > 0 {
|
||||
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
|
||||
}
|
||||
// The simple case: just copy a single image.
|
||||
if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil {
|
||||
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if options.ImageListSelection == CopySystemImage {
|
||||
copiedManifest = single.manifest
|
||||
} else if c.options.ImageListSelection == CopySystemImage {
|
||||
if len(options.EnsureCompressionVariantsExist) > 0 {
|
||||
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
|
||||
}
|
||||
// This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
|
||||
// matches the current system to copy, and copy it.
|
||||
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
|
|
@ -271,34 +289,35 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
instanceDigest, err := manifestList.ChooseInstanceByCompression(options.SourceCtx, options.PreferGzipInstances) // try to pick one that matches options.SourceCtx
|
||||
instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
|
||||
if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
|
||||
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copying system image from manifest list: %w", err)
|
||||
}
|
||||
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
|
||||
copiedManifest = single.manifest
|
||||
} else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */
|
||||
// If we were asked to copy multiple images and can't, that's an error.
|
||||
if !supportsMultipleImages(c.dest) {
|
||||
return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name())
|
||||
}
|
||||
// Copy some or all of the images.
|
||||
switch options.ImageListSelection {
|
||||
switch c.options.ImageListSelection {
|
||||
case CopyAllImages:
|
||||
logrus.Debugf("Source is a manifest list; copying all instances")
|
||||
case CopySpecificImages:
|
||||
logrus.Debugf("Source is a manifest list; copying some instances")
|
||||
}
|
||||
if copiedManifest, err = c.copyMultipleImages(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
if copiedManifest, err = c.copyMultipleImages(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
|
||||
if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil {
|
||||
return nil, fmt.Errorf("committing the finished image: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
8
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
|
|
@ -34,7 +34,7 @@ type bpDecryptionStepData struct {
|
|||
// srcInfo is only used for error messages.
|
||||
// Returns data for other steps; the caller should eventually use updateCryptoOperation.
|
||||
func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
|
||||
if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil {
|
||||
if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil {
|
||||
return &bpDecryptionStepData{
|
||||
decrypting: false,
|
||||
}, nil
|
||||
|
|
@ -47,7 +47,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
|
|||
desc := imgspecv1.Descriptor{
|
||||
Annotations: stream.info.Annotations,
|
||||
}
|
||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false)
|
||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
|
@ -81,7 +81,7 @@ type bpEncryptionStepData struct {
|
|||
// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
|
||||
func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
|
||||
decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
|
||||
if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil {
|
||||
if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil {
|
||||
return &bpEncryptionStepData{
|
||||
encrypting: false,
|
||||
}, nil
|
||||
|
|
@ -101,7 +101,7 @@ func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncryp
|
|||
Size: srcInfo.Size,
|
||||
Annotations: annotations,
|
||||
}
|
||||
reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc)
|
||||
reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
|
|
|||
159
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
159
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
|
|
@ -5,16 +5,19 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
|
|
@ -28,30 +31,125 @@ const (
|
|||
type instanceCopy struct {
|
||||
op instanceCopyKind
|
||||
sourceDigest digest.Digest
|
||||
|
||||
// Fields which can be used by callers when operation
|
||||
// is `instanceCopyClone`
|
||||
cloneCompressionVariant OptionCompressionVariant
|
||||
clonePlatform *imgspecv1.Platform
|
||||
cloneAnnotations map[string]string
|
||||
}
|
||||
|
||||
// internal type only to make imgspecv1.Platform comparable
|
||||
type platformComparable struct {
|
||||
architecture string
|
||||
os string
|
||||
osVersion string
|
||||
osFeatures string
|
||||
variant string
|
||||
}
|
||||
|
||||
// Converts imgspecv1.Platform to a comparable format.
|
||||
func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable {
|
||||
if platform == nil {
|
||||
return platformComparable{}
|
||||
}
|
||||
osFeatures := slices.Clone(platform.OSFeatures)
|
||||
sort.Strings(osFeatures)
|
||||
return platformComparable{architecture: platform.Architecture,
|
||||
os: platform.OS,
|
||||
// This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now.
|
||||
osFeatures: strings.Join(osFeatures, ","),
|
||||
osVersion: platform.OSVersion,
|
||||
variant: platform.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests
|
||||
func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) {
|
||||
res := make(map[platformComparable]*set.Set[string])
|
||||
for _, instanceDigest := range instanceDigests {
|
||||
instanceDetails, err := list.Instance(instanceDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
|
||||
}
|
||||
platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
|
||||
platformSet, ok := res[platform]
|
||||
if !ok {
|
||||
platformSet = set.New[string]()
|
||||
res[platform] = platformSet
|
||||
}
|
||||
platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func validateCompressionVariantExists(input []OptionCompressionVariant) error {
|
||||
for _, option := range input {
|
||||
_, err := compression.AlgorithmByName(option.Algorithm.Name())
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list.
|
||||
func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy {
|
||||
func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) {
|
||||
res := []instanceCopy{}
|
||||
if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 {
|
||||
// List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist`
|
||||
// It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances,
|
||||
// EnsureCompressionVariantsExist asks for an instance with some compression,
|
||||
// an instance with that compression already exists, but is not included in options.Instances.
|
||||
// We might define the semantics and implement this in the future.
|
||||
return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages")
|
||||
}
|
||||
err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
compressionsByPlatform, err := platformCompressionMap(list, instanceDigests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, instanceDigest := range instanceDigests {
|
||||
if options.ImageListSelection == CopySpecificImages &&
|
||||
!slices.Contains(options.Instances, instanceDigest) {
|
||||
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
||||
continue
|
||||
}
|
||||
instanceDetails, err := list.Instance(instanceDigest)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
|
||||
}
|
||||
res = append(res, instanceCopy{
|
||||
op: instanceCopyCopy,
|
||||
sourceDigest: instanceDigest,
|
||||
})
|
||||
platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
|
||||
compressionList := compressionsByPlatform[platform]
|
||||
for _, compressionVariant := range options.EnsureCompressionVariantsExist {
|
||||
if !compressionList.Contains(compressionVariant.Algorithm.Name()) {
|
||||
res = append(res, instanceCopy{
|
||||
op: instanceCopyClone,
|
||||
sourceDigest: instanceDigest,
|
||||
cloneCompressionVariant: compressionVariant,
|
||||
clonePlatform: instanceDetails.ReadOnly.Platform,
|
||||
cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations),
|
||||
})
|
||||
// add current compression to the list so that we don’t create duplicate clones
|
||||
compressionList.Add(compressionVariant.Algorithm.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// copyMultipleImages copies some or all of an image list's instances, using
|
||||
// policyContext to validate source image admissibility.
|
||||
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
|
||||
// c.policyContext to validate source image admissibility.
|
||||
func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) {
|
||||
// Parse the list and get a copy of the original value after it's re-encoded.
|
||||
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading manifest list: %w", err)
|
||||
}
|
||||
|
|
@ -61,7 +159,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
|||
}
|
||||
updatedList := originalList.CloneInternal()
|
||||
|
||||
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
|
||||
sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel,
|
||||
"Getting image list signatures",
|
||||
"Checking if image list destination supports signatures")
|
||||
if err != nil {
|
||||
|
|
@ -94,12 +192,12 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
|||
if destIsDigestedReference {
|
||||
cannotModifyManifestListReason = "Destination specifies a digest"
|
||||
}
|
||||
if options.PreserveDigests {
|
||||
if c.options.PreserveDigests {
|
||||
cannotModifyManifestListReason = "Instructed to preserve digests"
|
||||
}
|
||||
|
||||
// Determine if we'll need to convert the manifest list to a different format.
|
||||
forceListMIMEType := options.ForceManifestMIMEType
|
||||
forceListMIMEType := c.options.ForceManifestMIMEType
|
||||
switch forceListMIMEType {
|
||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
|
||||
forceListMIMEType = manifest.DockerV2ListMediaType
|
||||
|
|
@ -119,8 +217,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
|||
// Copy each image, or just the ones we want to copy, in turn.
|
||||
instanceDigests := updatedList.Instances()
|
||||
instanceEdits := []internalManifest.ListEdit{}
|
||||
instanceCopyList := prepareInstanceCopies(instanceDigests, options)
|
||||
c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests))
|
||||
instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("preparing instances for copy: %w", err)
|
||||
}
|
||||
c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests))
|
||||
for i, instance := range instanceCopyList {
|
||||
// Update instances to be edited by their `ListOperation` and
|
||||
// populate necessary fields.
|
||||
|
|
@ -129,17 +230,39 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
|||
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
|
||||
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest)
|
||||
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: false})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
|
||||
}
|
||||
// Record the result of a possible conversion here.
|
||||
instanceEdits = append(instanceEdits, internalManifest.ListEdit{
|
||||
ListOperation: internalManifest.ListOpUpdate,
|
||||
UpdateOldDigest: instance.sourceDigest,
|
||||
UpdateDigest: updatedManifestDigest,
|
||||
UpdateSize: int64(len(updatedManifest)),
|
||||
UpdateMediaType: updatedManifestType})
|
||||
ListOperation: internalManifest.ListOpUpdate,
|
||||
UpdateOldDigest: instance.sourceDigest,
|
||||
UpdateDigest: updated.manifestDigest,
|
||||
UpdateSize: int64(len(updated.manifest)),
|
||||
UpdateCompressionAlgorithms: updated.compressionAlgorithms,
|
||||
UpdateMediaType: updated.manifestMIMEType})
|
||||
case instanceCopyClone:
|
||||
logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
|
||||
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{
|
||||
requireCompressionFormatMatch: true,
|
||||
compressionFormat: &instance.cloneCompressionVariant.Algorithm,
|
||||
compressionLevel: instance.cloneCompressionVariant.Level})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
|
||||
}
|
||||
// Record the result of a possible conversion here.
|
||||
instanceEdits = append(instanceEdits, internalManifest.ListEdit{
|
||||
ListOperation: internalManifest.ListOpAdd,
|
||||
AddDigest: updated.manifestDigest,
|
||||
AddSize: int64(len(updated.manifest)),
|
||||
AddMediaType: updated.manifestMIMEType,
|
||||
AddPlatform: instance.clonePlatform,
|
||||
AddAnnotations: instance.cloneAnnotations,
|
||||
AddCompressionAlgorithms: updated.compressionAlgorithms,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
|
||||
}
|
||||
|
|
@ -204,7 +327,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
|||
}
|
||||
|
||||
// Sign the manifest list.
|
||||
newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity)
|
||||
newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
26
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
26
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
|
|
@ -13,20 +13,20 @@ import (
|
|||
"github.com/containers/image/v5/transports"
|
||||
)
|
||||
|
||||
// setupSigners initializes c.signers based on options.
|
||||
func (c *copier) setupSigners(options *Options) error {
|
||||
c.signers = append(c.signers, options.Signers...)
|
||||
// c.signersToClose is intentionally not updated with options.Signers.
|
||||
// setupSigners initializes c.signers.
|
||||
func (c *copier) setupSigners() error {
|
||||
c.signers = append(c.signers, c.options.Signers...)
|
||||
// c.signersToClose is intentionally not updated with c.options.Signers.
|
||||
|
||||
// We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need
|
||||
// to clean up any created signers on failure.
|
||||
|
||||
if options.SignBy != "" {
|
||||
if c.options.SignBy != "" {
|
||||
opts := []simplesigning.Option{
|
||||
simplesigning.WithKeyFingerprint(options.SignBy),
|
||||
simplesigning.WithKeyFingerprint(c.options.SignBy),
|
||||
}
|
||||
if options.SignPassphrase != "" {
|
||||
opts = append(opts, simplesigning.WithPassphrase(options.SignPassphrase))
|
||||
if c.options.SignPassphrase != "" {
|
||||
opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase))
|
||||
}
|
||||
signer, err := simplesigning.NewSigner(opts...)
|
||||
if err != nil {
|
||||
|
|
@ -36,9 +36,9 @@ func (c *copier) setupSigners(options *Options) error {
|
|||
c.signersToClose = append(c.signersToClose, signer)
|
||||
}
|
||||
|
||||
if options.SignBySigstorePrivateKeyFile != "" {
|
||||
if c.options.SignBySigstorePrivateKeyFile != "" {
|
||||
signer, err := sigstore.NewSigner(
|
||||
sigstore.WithPrivateKeyFile(options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase),
|
||||
sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -50,13 +50,13 @@ func (c *copier) setupSigners(options *Options) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// sourceSignatures returns signatures from unparsedSource based on options,
|
||||
// sourceSignatures returns signatures from unparsedSource,
|
||||
// and verifies that they can be used (to avoid copying a large image when we
|
||||
// can tell in advance that it would ultimately fail)
|
||||
func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, options *Options,
|
||||
func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage,
|
||||
gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) {
|
||||
var sigs []internalsig.Signature
|
||||
if options.RemoveSignatures {
|
||||
if c.options.RemoveSignatures {
|
||||
sigs = []internalsig.Signature{}
|
||||
} else {
|
||||
c.Printf("%s\n", gettingSignaturesMessage)
|
||||
|
|
|
|||
275
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
275
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
|
@ -30,40 +29,54 @@ import (
|
|||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src *image.SourcedImage
|
||||
diffIDsAreNeeded bool
|
||||
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
|
||||
canSubstituteBlobs bool
|
||||
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
|
||||
compressionLevel *int
|
||||
ociEncryptLayers *[]int
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src *image.SourcedImage
|
||||
diffIDsAreNeeded bool
|
||||
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
|
||||
canSubstituteBlobs bool
|
||||
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
|
||||
compressionLevel *int
|
||||
requireCompressionFormatMatch bool
|
||||
}
|
||||
|
||||
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
|
||||
type copySingleImageOptions struct {
|
||||
requireCompressionFormatMatch bool
|
||||
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
|
||||
compressionLevel *int
|
||||
}
|
||||
|
||||
// copySingleImageResult carries data produced by copySingleImage
|
||||
type copySingleImageResult struct {
|
||||
manifest []byte
|
||||
manifestMIMEType string
|
||||
manifestDigest digest.Digest
|
||||
compressionAlgorithms []compressiontypes.Algorithm
|
||||
}
|
||||
|
||||
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
|
||||
func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
|
||||
return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
|
||||
}
|
||||
if multiImage {
|
||||
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
}
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
// (The multiImage check above only matches the MIME type, which we have received anyway.
|
||||
// Actual parsing of anything should be deferred.)
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
|
||||
if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err)
|
||||
}
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
|
||||
return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
|
|
@ -75,33 +88,33 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
destIsDigestedReference = true
|
||||
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
manifestList, _, err := unparsedToplevel.Manifest(ctx)
|
||||
manifestList, _, err := c.unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
|
||||
return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err)
|
||||
}
|
||||
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
|
||||
return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return nil, "", "", err
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
sigs, err := c.sourceSignatures(ctx, src, options,
|
||||
sigs, err := c.sourceSignatures(ctx, src,
|
||||
"Getting image source signatures",
|
||||
"Checking if image destination supports signatures")
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
// Determine if we're allowed to modify the manifest.
|
||||
|
|
@ -114,7 +127,7 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
if destIsDigestedReference {
|
||||
cannotModifyManifestReason = "Destination specifies a digest"
|
||||
}
|
||||
if options.PreserveDigests {
|
||||
if c.options.PreserveDigests {
|
||||
cannotModifyManifestReason = "Instructed to preserve digests"
|
||||
}
|
||||
|
||||
|
|
@ -123,13 +136,16 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
cannotModifyManifestReason: cannotModifyManifestReason,
|
||||
ociEncryptLayers: options.OciEncryptLayers,
|
||||
cannotModifyManifestReason: cannotModifyManifestReason,
|
||||
requireCompressionFormatMatch: opts.requireCompressionFormatMatch,
|
||||
}
|
||||
if options.DestinationCtx != nil {
|
||||
if opts.compressionFormat != nil {
|
||||
ic.compressionFormat = opts.compressionFormat
|
||||
ic.compressionLevel = opts.compressionLevel
|
||||
} else if c.options.DestinationCtx != nil {
|
||||
// Note that compressionFormat and compressionLevel can be nil.
|
||||
ic.compressionFormat = options.DestinationCtx.CompressionFormat
|
||||
ic.compressionLevel = options.DestinationCtx.CompressionLevel
|
||||
ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
|
||||
ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
|
||||
}
|
||||
// Decide whether we can substitute blobs with semantic equivalents:
|
||||
// - Don’t do that if we can’t modify the manifest at all
|
||||
|
|
@ -142,20 +158,20 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return nil, "", "", err
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
|
||||
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig != nil) || c.options.OciEncryptLayers != nil
|
||||
|
||||
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
|
||||
srcMIMEType: ic.src.ManifestMIMEType,
|
||||
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
|
||||
forceManifestMIMEType: options.ForceManifestMIMEType,
|
||||
forceManifestMIMEType: c.options.ForceManifestMIMEType,
|
||||
requiresOCIEncryption: destRequiresOciEncryption,
|
||||
cannotModifyManifestReason: ic.cannotModifyManifestReason,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
// We set up this part of ic.manifestUpdates quite early, not just around the
|
||||
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
|
||||
|
|
@ -169,27 +185,28 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
|
||||
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
|
||||
if options.OptimizeDestinationImageAlreadyExists {
|
||||
if c.options.OptimizeDestinationImageAlreadyExists {
|
||||
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
|
||||
noPendingManifestUpdates := ic.noPendingManifestUpdates()
|
||||
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
|
||||
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
|
||||
isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
|
||||
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
|
||||
matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to compare destination image manifest: %v", err)
|
||||
return nil, "", "", err
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
if isSrcDestManifestEqual {
|
||||
if matchedResult != nil {
|
||||
c.Printf("Skipping: image already present at destination\n")
|
||||
return retManifest, retManifestType, retManifestDigest, nil
|
||||
return *matchedResult, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return nil, "", "", err
|
||||
compressionAlgos, err := ic.copyLayers(ctx)
|
||||
if err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
||||
|
|
@ -197,8 +214,12 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
|
||||
// we're altering how they're compressed. If the process succeeds, fine…
|
||||
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
||||
retManifestType = manifestConversionPlan.preferredMIMEType
|
||||
manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
||||
wipResult := copySingleImageResult{
|
||||
manifest: manifestBytes,
|
||||
manifestMIMEType: manifestConversionPlan.preferredMIMEType,
|
||||
manifestDigest: manifestDigest,
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
|
||||
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
|
||||
|
|
@ -213,14 +234,14 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
// We don’t have other options.
|
||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||
// Don’t bother the user with MIME types if we have no choice.
|
||||
return nil, "", "", err
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
|
||||
// So if we are here, we will definitely be trying to convert the manifest.
|
||||
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
|
||||
return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
|
|
@ -236,34 +257,37 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
|
|||
}
|
||||
|
||||
// We have successfully uploaded a manifest.
|
||||
manifestBytes = attemptedManifest
|
||||
retManifestDigest = attemptedManifestDigest
|
||||
retManifestType = manifestMIMEType
|
||||
wipResult = copySingleImageResult{
|
||||
manifest: attemptedManifest,
|
||||
manifestMIMEType: manifestMIMEType,
|
||||
manifestDigest: attemptedManifestDigest,
|
||||
}
|
||||
errs = nil // Mark this as a success so that we don't abort below.
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
}
|
||||
if targetInstance != nil {
|
||||
targetInstance = &retManifestDigest
|
||||
targetInstance = &wipResult.manifestDigest
|
||||
}
|
||||
|
||||
newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity)
|
||||
newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
|
||||
if len(sigs) > 0 {
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
|
||||
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
|
||||
return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return manifestBytes, retManifestType, retManifestDigest, nil
|
||||
wipResult.compressionAlgorithms = compressionAlgos
|
||||
res := wipResult // We are done
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
|
|
@ -323,52 +347,68 @@ func (ic *imageCopier) noPendingManifestUpdates() bool {
|
|||
return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
|
||||
}
|
||||
|
||||
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
|
||||
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
|
||||
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
|
||||
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
|
||||
// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the
|
||||
// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise.
|
||||
func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) {
|
||||
srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob)
|
||||
if err != nil {
|
||||
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
|
||||
return nil, fmt.Errorf("calculating manifest digest: %w", err)
|
||||
}
|
||||
|
||||
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
|
||||
destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
|
||||
return false, nil, "", "", nil
|
||||
logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
|
||||
return false, nil, "", "", nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
destManifestDigest, err := manifest.Digest(destManifest)
|
||||
if err != nil {
|
||||
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
|
||||
return nil, fmt.Errorf("calculating manifest digest: %w", err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
|
||||
if srcManifestDigest != destManifestDigest {
|
||||
return false, nil, "", "", nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
compressionAlgos := set.New[string]()
|
||||
for _, srcInfo := range ic.src.LayerInfos() {
|
||||
compression := compressionAlgorithmFromMIMEType(srcInfo)
|
||||
compressionAlgos.Add(compression.Name())
|
||||
}
|
||||
|
||||
algos, err := algorithmsByNames(compressionAlgos.Values())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Destination and source manifests, types and digests should all be equivalent
|
||||
return true, destManifest, destManifestType, destManifestDigest, nil
|
||||
return ©SingleImageResult{
|
||||
manifest: destManifest,
|
||||
manifestMIMEType: destManifestType,
|
||||
manifestDigest: srcManifestDigest,
|
||||
compressionAlgorithms: algos,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
numLayers := len(srcInfos)
|
||||
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
srcInfosUpdated := false
|
||||
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
|
||||
return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
|
||||
}
|
||||
srcInfos = updatedSrcInfos
|
||||
srcInfosUpdated = true
|
||||
|
|
@ -384,7 +424,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||
// layer is empty.
|
||||
man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
manifestLayerInfos := man.LayerInfos()
|
||||
|
||||
|
|
@ -396,7 +436,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
|
|
@ -415,10 +455,10 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||
// Decide which layers to encrypt
|
||||
layersToEncrypt := set.New[int]()
|
||||
var encryptAll bool
|
||||
if ic.ociEncryptLayers != nil {
|
||||
encryptAll = len(*ic.ociEncryptLayers) == 0
|
||||
if ic.c.options.OciEncryptLayers != nil {
|
||||
encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
|
||||
totalLayers := len(srcInfos)
|
||||
for _, l := range *ic.ociEncryptLayers {
|
||||
for _, l := range *ic.c.options.OciEncryptLayers {
|
||||
// if layer is negative, it is reverse indexed.
|
||||
layersToEncrypt.Add((totalLayers + l) % totalLayers)
|
||||
}
|
||||
|
|
@ -450,14 +490,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||
// A call to copyGroup.Wait() is done at this point by the defer above.
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
compressionAlgos := set.New[string]()
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return cld.err
|
||||
return nil, cld.err
|
||||
}
|
||||
if cld.destInfo.CompressionAlgorithm != nil {
|
||||
compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name())
|
||||
}
|
||||
destInfos[i] = cld.destInfo
|
||||
diffIDs[i] = cld.diffID
|
||||
|
|
@ -472,7 +516,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
|
||||
ic.manifestUpdates.LayerInfos = destInfos
|
||||
}
|
||||
return nil
|
||||
algos, err := algorithmsByNames(compressionAlgos.Values())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return algos, nil
|
||||
}
|
||||
|
||||
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
|
||||
|
|
@ -577,6 +625,19 @@ type diffIDResult struct {
|
|||
err error
|
||||
}
|
||||
|
||||
func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm {
|
||||
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
|
||||
// package (but we should preferably replace/change UpdatedImage instead of productizing
|
||||
// this workaround).
|
||||
switch srcInfo.MediaType {
|
||||
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
|
||||
return &compression.Gzip
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return &compression.Zstd
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
|
||||
|
|
@ -588,17 +649,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
// which uses the compression information to compute the updated MediaType values.
|
||||
// (Sadly UpdatedImage() is documented to not update MediaTypes from
|
||||
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
|
||||
//
|
||||
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
|
||||
// package (but we should preferably replace/change UpdatedImage instead of productizing
|
||||
// this workaround).
|
||||
if srcInfo.CompressionAlgorithm == nil {
|
||||
switch srcInfo.MediaType {
|
||||
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
|
||||
srcInfo.CompressionAlgorithm = &compression.Gzip
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
srcInfo.CompressionAlgorithm = &compression.Zstd
|
||||
}
|
||||
srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo)
|
||||
}
|
||||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
|
|
@ -608,7 +660,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
|
||||
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
|
||||
// but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
|
||||
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
|
||||
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil)
|
||||
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
|
||||
|
||||
// Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
|
||||
|
|
@ -623,12 +675,20 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
||||
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
||||
// the ImageDestination interface lets us pass in.
|
||||
var requiredCompression *compressiontypes.Algorithm
|
||||
var originalCompression *compressiontypes.Algorithm
|
||||
if ic.requireCompressionFormatMatch {
|
||||
requiredCompression = ic.compressionFormat
|
||||
originalCompression = srcInfo.CompressionAlgorithm
|
||||
}
|
||||
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: canSubstitute,
|
||||
EmptyLayer: emptyLayer,
|
||||
LayerIndex: &layerIndex,
|
||||
SrcRef: srcRef,
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: canSubstitute,
|
||||
EmptyLayer: emptyLayer,
|
||||
LayerIndex: &layerIndex,
|
||||
SrcRef: srcRef,
|
||||
RequiredCompression: requiredCompression,
|
||||
OriginalCompression: originalCompression,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
|
||||
|
|
@ -642,8 +702,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
}()
|
||||
|
||||
// Throw an event that the layer has been skipped
|
||||
if ic.c.progress != nil && ic.c.progressInterval > 0 {
|
||||
ic.c.progress <- types.ProgressProperties{
|
||||
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
|
||||
ic.c.options.Progress <- types.ProgressProperties{
|
||||
Event: types.ProgressEventSkipped,
|
||||
Artifact: srcInfo,
|
||||
}
|
||||
|
|
@ -818,3 +878,16 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
|
|||
|
||||
return digest.Canonical.FromReader(stream)
|
||||
}
|
||||
|
||||
// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names
|
||||
func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) {
|
||||
result := []compressiontypes.Algorithm{}
|
||||
for _, name := range names {
|
||||
algo, err := compression.AlgorithmByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, algo)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
|||
41
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
41
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
|
|
@ -321,13 +321,21 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||
if err != nil {
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
if impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||
if err != nil {
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
}
|
||||
} else {
|
||||
requiredCompression := "nil"
|
||||
if options.OriginalCompression != nil {
|
||||
requiredCompression = options.OriginalCompression.Name()
|
||||
}
|
||||
logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression)
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
|
|
@ -338,6 +346,19 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
|
||||
requiredCompression := "nil"
|
||||
if compressionAlgorithm != nil {
|
||||
requiredCompression = compressionAlgorithm.Name()
|
||||
}
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
|
||||
continue
|
||||
}
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
} else {
|
||||
|
|
@ -388,12 +409,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
|
||||
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
|
||||
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
return true, private.ReusedBlob{
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
|
|
@ -129,6 +129,9 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
|
|||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if !impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
|
|
|
|||
20
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
Normal file
20
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package impl
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
)
|
||||
|
||||
// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required
|
||||
// then function performs a match against the compression requested by the caller and compression of existing blob
|
||||
// (which can be nil to represent uncompressed or unknown)
|
||||
func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
|
||||
if options.RequiredCompression == nil {
|
||||
return true // no requirement imposed
|
||||
}
|
||||
return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
|
||||
}
|
||||
|
||||
func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool {
|
||||
return BlobMatchesRequiredCompression(opts, opts.OriginalCompression)
|
||||
}
|
||||
3
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
|
|
@ -64,6 +64,9 @@ func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inpu
|
|||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if options.RequiredCompression != nil {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
|
||||
if !reused || err != nil {
|
||||
return reused, private.ReusedBlob{}, err
|
||||
|
|
|
|||
14
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
14
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
platform "github.com/containers/image/v5/internal/pkg/platform"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -57,11 +58,20 @@ func (list *Schema2ListPublic) Instances() []digest.Digest {
|
|||
func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
|
||||
for _, manifest := range list.Manifests {
|
||||
if manifest.Digest == instanceDigest {
|
||||
return ListUpdate{
|
||||
ret := ListUpdate{
|
||||
Digest: manifest.Digest,
|
||||
Size: manifest.Size,
|
||||
MediaType: manifest.MediaType,
|
||||
}, nil
|
||||
}
|
||||
ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName}
|
||||
ret.ReadOnly.Platform = &imgspecv1.Platform{
|
||||
OS: manifest.Platform.OS,
|
||||
Architecture: manifest.Platform.Architecture,
|
||||
OSVersion: manifest.Platform.OSVersion,
|
||||
OSFeatures: manifest.Platform.OSFeatures,
|
||||
Variant: manifest.Platform.Variant,
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
|
|
@ -68,6 +68,12 @@ type ListUpdate struct {
|
|||
Digest digest.Digest
|
||||
Size int64
|
||||
MediaType string
|
||||
// ReadOnly fields: may be set by Instance(), ignored by UpdateInstance()
|
||||
ReadOnly struct {
|
||||
Platform *imgspecv1.Platform
|
||||
Annotations map[string]string
|
||||
CompressionAlgorithmNames []string
|
||||
}
|
||||
}
|
||||
|
||||
type ListOp int
|
||||
|
|
|
|||
31
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
31
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
|
|
@ -53,11 +53,15 @@ func (index *OCI1IndexPublic) Instances() []digest.Digest {
|
|||
func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
|
||||
for _, manifest := range index.Manifests {
|
||||
if manifest.Digest == instanceDigest {
|
||||
return ListUpdate{
|
||||
ret := ListUpdate{
|
||||
Digest: manifest.Digest,
|
||||
Size: manifest.Size,
|
||||
MediaType: manifest.MediaType,
|
||||
}, nil
|
||||
}
|
||||
ret.ReadOnly.Platform = manifest.Platform
|
||||
ret.ReadOnly.Annotations = manifest.Annotations
|
||||
ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations)
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
|
||||
|
|
@ -78,14 +82,29 @@ func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
|
|||
return index.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap map[string]string) {
|
||||
func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string {
|
||||
result := make([]string, 0, 1)
|
||||
if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue {
|
||||
result = append(result, compression.ZstdAlgorithmName)
|
||||
}
|
||||
// No compression was detected, hence assume instance has default compression `Gzip`
|
||||
if len(result) == 0 {
|
||||
result = append(result, compression.GzipAlgorithmName)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) {
|
||||
// TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm
|
||||
// list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable
|
||||
// and full compressionAlghorithms list.
|
||||
if *annotationsMap == nil && len(compressionAlgorithms) > 0 {
|
||||
*annotationsMap = map[string]string{}
|
||||
}
|
||||
for _, algo := range compressionAlgorithms {
|
||||
switch algo.Name() {
|
||||
case compression.ZstdAlgorithmName:
|
||||
annotationsMap[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
|
||||
(*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
|
@ -130,13 +149,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
|
|||
maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations)
|
||||
}
|
||||
}
|
||||
addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, index.Manifests[targetIndex].Annotations)
|
||||
addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations)
|
||||
case ListOpAdd:
|
||||
annotations := map[string]string{}
|
||||
if editInstance.AddAnnotations != nil {
|
||||
annotations = maps.Clone(editInstance.AddAnnotations)
|
||||
}
|
||||
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, annotations)
|
||||
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations)
|
||||
addedEntries = append(addedEntries, imgspecv1.Descriptor{
|
||||
MediaType: editInstance.AddMediaType,
|
||||
Size: editInstance.AddSize,
|
||||
|
|
|
|||
9
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
9
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
|
|
@ -112,10 +112,11 @@ type TryReusingBlobOptions struct {
|
|||
// Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
|
||||
// if they use internal/imagedestination/impl.Compat;
|
||||
// in that case, they will all be consistently zero-valued.
|
||||
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
|
||||
OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”.
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
}
|
||||
|
||||
// ReusedBlob is information about a blob reused in a destination.
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/set/set.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/set/set.go
generated
vendored
|
|
@ -28,6 +28,12 @@ func (s *Set[E]) Add(v E) {
|
|||
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
}
|
||||
|
||||
func (s *Set[E]) AddSlice(slice []E) {
|
||||
for _, v := range slice {
|
||||
s.Add(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Set[E]) Delete(v E) {
|
||||
delete(s.m, v)
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
3
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
|
|
@ -172,6 +172,9 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if !impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
if info.Digest == "" {
|
||||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
|
|
@ -6,9 +6,9 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 26
|
||||
VersionMinor = 27
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
|
|
|||
19
vendor/github.com/osbuild/images/internal/oscap/oscap.go
generated
vendored
19
vendor/github.com/osbuild/images/internal/oscap/oscap.go
generated
vendored
|
|
@ -1,7 +1,11 @@
|
|||
package oscap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/osbuild/images/internal/fsnode"
|
||||
)
|
||||
|
||||
type Profile string
|
||||
|
|
@ -35,6 +39,9 @@ const (
|
|||
defaultCentos9Datastream string = "/usr/share/xml/scap/ssg/content/ssg-cs9-ds.xml"
|
||||
defaultRHEL8Datastream string = "/usr/share/xml/scap/ssg/content/ssg-rhel8-ds.xml"
|
||||
defaultRHEL9Datastream string = "/usr/share/xml/scap/ssg/content/ssg-rhel9-ds.xml"
|
||||
|
||||
// tailoring directory path
|
||||
tailoringDirPath string = "/usr/share/xml/osbuild-openscap-data"
|
||||
)
|
||||
|
||||
func DefaultFedoraDatastream() string {
|
||||
|
|
@ -70,3 +77,15 @@ func IsProfileAllowed(profile string, allowlist []Profile) bool {
|
|||
|
||||
return false
|
||||
}
|
||||
|
||||
func GetTailoringFile(profile string) (string, string, *fsnode.Directory, error) {
|
||||
newProfile := fmt.Sprintf("%s_osbuild_tailoring", profile)
|
||||
path := filepath.Join(tailoringDirPath, "tailoring.xml")
|
||||
|
||||
tailoringDir, err := fsnode.NewDirectory(tailoringDirPath, nil, nil, nil, true)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
return newProfile, path, tailoringDir, nil
|
||||
}
|
||||
|
|
|
|||
10
vendor/github.com/osbuild/images/pkg/blueprint/customizations.go
generated
vendored
10
vendor/github.com/osbuild/images/pkg/blueprint/customizations.go
generated
vendored
|
|
@ -107,8 +107,14 @@ type ServicesCustomization struct {
|
|||
}
|
||||
|
||||
type OpenSCAPCustomization struct {
|
||||
DataStream string `json:"datastream,omitempty" toml:"datastream,omitempty"`
|
||||
ProfileID string `json:"profile_id,omitempty" toml:"profile_id,omitempty"`
|
||||
DataStream string `json:"datastream,omitempty" toml:"datastream,omitempty"`
|
||||
ProfileID string `json:"profile_id,omitempty" toml:"profile_id,omitempty"`
|
||||
Tailoring *OpenSCAPTailoringCustomizations `json:"tailoring,omitempty" toml:"tailoring,omitempty"`
|
||||
}
|
||||
|
||||
type OpenSCAPTailoringCustomizations struct {
|
||||
Selected []string `json:"selected,omitempty" toml:"selected,omitempty"`
|
||||
Unselected []string `json:"unselected,omitempty" toml:"unselected,omitempty"`
|
||||
}
|
||||
|
||||
type CustomizationError struct {
|
||||
|
|
|
|||
59
vendor/github.com/osbuild/images/pkg/distro/fedora/images.go
generated
vendored
59
vendor/github.com/osbuild/images/pkg/distro/fedora/images.go
generated
vendored
|
|
@ -118,22 +118,6 @@ func osCustomizations(
|
|||
osc.SElinux = "targeted"
|
||||
}
|
||||
|
||||
if oscapConfig := c.GetOpenSCAP(); oscapConfig != nil {
|
||||
if t.rpmOstree {
|
||||
panic("unexpected oscap options for ostree image type")
|
||||
}
|
||||
var datastream = oscapConfig.DataStream
|
||||
if datastream == "" {
|
||||
datastream = oscap.DefaultFedoraDatastream()
|
||||
}
|
||||
osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(
|
||||
osbuild.OscapConfig{
|
||||
Datastream: datastream,
|
||||
ProfileID: oscapConfig.ProfileID,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
var err error
|
||||
osc.Directories, err = blueprint.DirectoryCustomizationsToFsNodeDirectories(c.GetDirectories())
|
||||
if err != nil {
|
||||
|
|
@ -174,6 +158,49 @@ func osCustomizations(
|
|||
osc.YUMRepos = append(osc.YUMRepos, osbuild.NewYumReposStageOptions(filename, repos))
|
||||
}
|
||||
|
||||
if oscapConfig := c.GetOpenSCAP(); oscapConfig != nil {
|
||||
if t.rpmOstree {
|
||||
panic("unexpected oscap options for ostree image type")
|
||||
}
|
||||
var datastream = oscapConfig.DataStream
|
||||
if datastream == "" {
|
||||
datastream = oscap.DefaultFedoraDatastream()
|
||||
}
|
||||
|
||||
oscapStageOptions := osbuild.OscapConfig{
|
||||
Datastream: datastream,
|
||||
ProfileID: oscapConfig.ProfileID,
|
||||
}
|
||||
|
||||
if oscapConfig.Tailoring != nil {
|
||||
newProfile, tailoringFilepath, tailoringDir, err := oscap.GetTailoringFile(oscapConfig.ProfileID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error creating tailoring file options: %v", err))
|
||||
}
|
||||
|
||||
tailoringOptions := osbuild.OscapAutotailorConfig{
|
||||
Selected: oscapConfig.Tailoring.Selected,
|
||||
Unselected: oscapConfig.Tailoring.Unselected,
|
||||
NewProfile: newProfile,
|
||||
}
|
||||
|
||||
osc.OpenSCAPTailorConfig = osbuild.NewOscapAutotailorStageOptions(
|
||||
tailoringFilepath,
|
||||
oscapStageOptions,
|
||||
tailoringOptions,
|
||||
)
|
||||
|
||||
// overwrite the profile id with the new tailoring id
|
||||
oscapStageOptions.ProfileID = newProfile
|
||||
oscapStageOptions.Tailoring = tailoringFilepath
|
||||
|
||||
// add the parent directory for the tailoring file
|
||||
osc.Directories = append(osc.Directories, tailoringDir)
|
||||
}
|
||||
|
||||
osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(oscapStageOptions)
|
||||
}
|
||||
|
||||
osc.ShellInit = imageConfig.ShellInit
|
||||
|
||||
osc.Grub2Config = imageConfig.Grub2Config
|
||||
|
|
|
|||
10
vendor/github.com/osbuild/images/pkg/distro/fedora/imagetype.go
generated
vendored
10
vendor/github.com/osbuild/images/pkg/distro/fedora/imagetype.go
generated
vendored
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/image"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/images/pkg/platform"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"golang.org/x/exp/slices"
|
||||
|
|
@ -245,18 +244,15 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
return nil, fmt.Errorf("embedding containers is not supported for %s on %s", t.name, t.arch.distro.name)
|
||||
}
|
||||
|
||||
ostreeURL := ""
|
||||
if options.OSTree != nil {
|
||||
if options.OSTree.ParentRef != "" && options.OSTree.URL == "" {
|
||||
// specifying parent ref also requires URL
|
||||
return nil, ostree.NewParameterComboError("ostree parent ref specified, but no URL to retrieve it")
|
||||
if err := options.OSTree.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ostreeURL = options.OSTree.URL
|
||||
}
|
||||
|
||||
if t.bootISO && t.rpmOstree {
|
||||
// ostree-based ISOs require a URL from which to pull a payload commit
|
||||
if ostreeURL == "" {
|
||||
if options.OSTree == nil || options.OSTree.URL == "" {
|
||||
return nil, fmt.Errorf("boot ISO image type %q requires specifying a URL from which to retrieve the OSTree commit", t.name)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
59
vendor/github.com/osbuild/images/pkg/distro/rhel8/images.go
generated
vendored
59
vendor/github.com/osbuild/images/pkg/distro/rhel8/images.go
generated
vendored
|
|
@ -133,22 +133,6 @@ func osCustomizations(
|
|||
osc.SElinux = "targeted"
|
||||
}
|
||||
|
||||
if oscapConfig := c.GetOpenSCAP(); oscapConfig != nil {
|
||||
if t.rpmOstree {
|
||||
panic("unexpected oscap options for ostree image type")
|
||||
}
|
||||
var datastream = oscapConfig.DataStream
|
||||
if datastream == "" {
|
||||
datastream = oscap.DefaultRHEL8Datastream(t.arch.distro.isRHEL())
|
||||
}
|
||||
osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(
|
||||
osbuild.OscapConfig{
|
||||
Datastream: datastream,
|
||||
ProfileID: oscapConfig.ProfileID,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if t.arch.distro.isRHEL() && options.Facts != nil {
|
||||
osc.FactAPIType = &options.Facts.APIType
|
||||
}
|
||||
|
|
@ -197,6 +181,49 @@ func osCustomizations(
|
|||
osc.YUMRepos = append(osc.YUMRepos, osbuild.NewYumReposStageOptions(filename, repos))
|
||||
}
|
||||
|
||||
if oscapConfig := c.GetOpenSCAP(); oscapConfig != nil {
|
||||
if t.rpmOstree {
|
||||
panic("unexpected oscap options for ostree image type")
|
||||
}
|
||||
var datastream = oscapConfig.DataStream
|
||||
if datastream == "" {
|
||||
datastream = oscap.DefaultRHEL8Datastream(t.arch.distro.isRHEL())
|
||||
}
|
||||
|
||||
oscapStageOptions := osbuild.OscapConfig{
|
||||
Datastream: datastream,
|
||||
ProfileID: oscapConfig.ProfileID,
|
||||
}
|
||||
|
||||
if oscapConfig.Tailoring != nil {
|
||||
newProfile, tailoringFilepath, tailoringDir, err := oscap.GetTailoringFile(oscapConfig.ProfileID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error creating tailoring file options: %v", err))
|
||||
}
|
||||
|
||||
tailoringOptions := osbuild.OscapAutotailorConfig{
|
||||
Selected: oscapConfig.Tailoring.Selected,
|
||||
Unselected: oscapConfig.Tailoring.Unselected,
|
||||
NewProfile: newProfile,
|
||||
}
|
||||
|
||||
osc.OpenSCAPTailorConfig = osbuild.NewOscapAutotailorStageOptions(
|
||||
tailoringFilepath,
|
||||
oscapStageOptions,
|
||||
tailoringOptions,
|
||||
)
|
||||
|
||||
// overwrite the profile id with the new tailoring id
|
||||
oscapStageOptions.ProfileID = newProfile
|
||||
oscapStageOptions.Tailoring = tailoringFilepath
|
||||
|
||||
// add the parent directory for the tailoring file
|
||||
osc.Directories = append(osc.Directories, tailoringDir)
|
||||
}
|
||||
|
||||
osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(oscapStageOptions)
|
||||
}
|
||||
|
||||
osc.ShellInit = imageConfig.ShellInit
|
||||
|
||||
osc.Grub2Config = imageConfig.Grub2Config
|
||||
|
|
|
|||
25
vendor/github.com/osbuild/images/pkg/distro/rhel8/imagetype.go
generated
vendored
25
vendor/github.com/osbuild/images/pkg/distro/rhel8/imagetype.go
generated
vendored
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/image"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/images/pkg/platform"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
)
|
||||
|
|
@ -279,19 +278,16 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
return warnings, fmt.Errorf("embedding containers is not supported for %s on %s", t.name, t.arch.distro.name)
|
||||
}
|
||||
|
||||
ostreeURL := ""
|
||||
if options.OSTree != nil {
|
||||
if options.OSTree.ParentRef != "" && options.OSTree.URL == "" {
|
||||
// specifying parent ref also requires URL
|
||||
return nil, ostree.NewParameterComboError("ostree parent ref specified, but no URL to retrieve it")
|
||||
if err := options.OSTree.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ostreeURL = options.OSTree.URL
|
||||
}
|
||||
|
||||
if t.bootISO && t.rpmOstree {
|
||||
// ostree-based ISOs require a URL from which to pull a payload commit
|
||||
if ostreeURL == "" {
|
||||
return warnings, fmt.Errorf("boot ISO image type %q requires specifying a URL from which to retrieve the OSTree commit", t.name)
|
||||
if options.OSTree == nil || options.OSTree.URL == "" {
|
||||
return nil, fmt.Errorf("boot ISO image type %q requires specifying a URL from which to retrieve the OSTree commit", t.name)
|
||||
}
|
||||
|
||||
if t.name == "edge-simplified-installer" {
|
||||
|
|
@ -331,8 +327,8 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
|
||||
if t.name == "edge-raw-image" {
|
||||
// ostree-based bootable images require a URL from which to pull a payload commit
|
||||
if ostreeURL == "" {
|
||||
return warnings, fmt.Errorf("edge raw images require specifying a URL from which to retrieve the OSTree commit")
|
||||
if options.OSTree == nil || options.OSTree.URL == "" {
|
||||
return warnings, fmt.Errorf("%q images require specifying a URL from which to retrieve the OSTree commit", t.name)
|
||||
}
|
||||
|
||||
allowed := []string{"User", "Group"}
|
||||
|
|
@ -357,7 +353,7 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
}
|
||||
}
|
||||
|
||||
if kernelOpts := customizations.GetKernel(); kernelOpts.Append != "" && t.rpmOstree && (!t.bootable || t.bootISO) {
|
||||
if kernelOpts := customizations.GetKernel(); kernelOpts.Append != "" && t.rpmOstree && t.name != "edge-raw-image" && t.name != "edge-simplified-installer" {
|
||||
return warnings, fmt.Errorf("kernel boot parameter customizations are not supported for ostree types")
|
||||
}
|
||||
|
||||
|
|
@ -373,12 +369,10 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
}
|
||||
|
||||
if osc := customizations.GetOpenSCAP(); osc != nil {
|
||||
// only add support for RHEL 8.7 and above.
|
||||
if common.VersionLessThan(t.arch.distro.osVersion, "8.7") {
|
||||
if t.arch.distro.osVersion == "9.0" {
|
||||
return warnings, fmt.Errorf(fmt.Sprintf("OpenSCAP unsupported os version: %s", t.arch.distro.osVersion))
|
||||
}
|
||||
supported := oscap.IsProfileAllowed(osc.ProfileID, oscapProfileAllowList)
|
||||
if !supported {
|
||||
if !oscap.IsProfileAllowed(osc.ProfileID, oscapProfileAllowList) {
|
||||
return warnings, fmt.Errorf(fmt.Sprintf("OpenSCAP unsupported profile: %s", osc.ProfileID))
|
||||
}
|
||||
if t.rpmOstree {
|
||||
|
|
@ -397,7 +391,6 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
if err != nil {
|
||||
return warnings, err
|
||||
}
|
||||
|
||||
err = blueprint.CheckDirectoryCustomizationsPolicy(dc, pathpolicy.CustomDirectoriesPolicies)
|
||||
if err != nil {
|
||||
return warnings, err
|
||||
|
|
|
|||
59
vendor/github.com/osbuild/images/pkg/distro/rhel9/images.go
generated
vendored
59
vendor/github.com/osbuild/images/pkg/distro/rhel9/images.go
generated
vendored
|
|
@ -130,22 +130,6 @@ func osCustomizations(
|
|||
osc.SElinux = "targeted"
|
||||
}
|
||||
|
||||
if oscapConfig := c.GetOpenSCAP(); oscapConfig != nil {
|
||||
if t.rpmOstree {
|
||||
panic("unexpected oscap options for ostree image type")
|
||||
}
|
||||
var datastream = oscapConfig.DataStream
|
||||
if datastream == "" {
|
||||
datastream = oscap.DefaultRHEL9Datastream(t.arch.distro.isRHEL())
|
||||
}
|
||||
osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(
|
||||
osbuild.OscapConfig{
|
||||
Datastream: datastream,
|
||||
ProfileID: oscapConfig.ProfileID,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if t.arch.distro.isRHEL() && options.Facts != nil {
|
||||
osc.FactAPIType = &options.Facts.APIType
|
||||
}
|
||||
|
|
@ -194,6 +178,49 @@ func osCustomizations(
|
|||
osc.YUMRepos = append(osc.YUMRepos, osbuild.NewYumReposStageOptions(filename, repos))
|
||||
}
|
||||
|
||||
if oscapConfig := c.GetOpenSCAP(); oscapConfig != nil {
|
||||
if t.rpmOstree {
|
||||
panic("unexpected oscap options for ostree image type")
|
||||
}
|
||||
var datastream = oscapConfig.DataStream
|
||||
if datastream == "" {
|
||||
datastream = oscap.DefaultRHEL9Datastream(t.arch.distro.isRHEL())
|
||||
}
|
||||
|
||||
oscapStageOptions := osbuild.OscapConfig{
|
||||
Datastream: datastream,
|
||||
ProfileID: oscapConfig.ProfileID,
|
||||
}
|
||||
|
||||
if oscapConfig.Tailoring != nil {
|
||||
newProfile, tailoringFilepath, tailoringDir, err := oscap.GetTailoringFile(oscapConfig.ProfileID)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error creating tailoring file options: %v", err))
|
||||
}
|
||||
|
||||
tailoringOptions := osbuild.OscapAutotailorConfig{
|
||||
Selected: oscapConfig.Tailoring.Selected,
|
||||
Unselected: oscapConfig.Tailoring.Unselected,
|
||||
NewProfile: newProfile,
|
||||
}
|
||||
|
||||
osc.OpenSCAPTailorConfig = osbuild.NewOscapAutotailorStageOptions(
|
||||
tailoringFilepath,
|
||||
oscapStageOptions,
|
||||
tailoringOptions,
|
||||
)
|
||||
|
||||
// overwrite the profile id with the new tailoring id
|
||||
oscapStageOptions.ProfileID = newProfile
|
||||
oscapStageOptions.Tailoring = tailoringFilepath
|
||||
|
||||
// add the parent directory for the tailoring file
|
||||
osc.Directories = append(osc.Directories, tailoringDir)
|
||||
}
|
||||
|
||||
osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(oscapStageOptions)
|
||||
}
|
||||
|
||||
osc.ShellInit = imageConfig.ShellInit
|
||||
|
||||
osc.Grub2Config = imageConfig.Grub2Config
|
||||
|
|
|
|||
14
vendor/github.com/osbuild/images/pkg/distro/rhel9/imagetype.go
generated
vendored
14
vendor/github.com/osbuild/images/pkg/distro/rhel9/imagetype.go
generated
vendored
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/image"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/images/pkg/platform"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
)
|
||||
|
|
@ -282,19 +281,16 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
return warnings, fmt.Errorf("embedding containers is not supported for %s on %s", t.name, t.arch.distro.name)
|
||||
}
|
||||
|
||||
ostreeURL := ""
|
||||
if options.OSTree != nil {
|
||||
if options.OSTree.ParentRef != "" && options.OSTree.URL == "" {
|
||||
// specifying parent ref also requires URL
|
||||
return nil, ostree.NewParameterComboError("ostree parent ref specified, but no URL to retrieve it")
|
||||
if err := options.OSTree.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ostreeURL = options.OSTree.URL
|
||||
}
|
||||
|
||||
if t.bootISO && t.rpmOstree {
|
||||
// ostree-based ISOs require a URL from which to pull a payload commit
|
||||
if ostreeURL == "" {
|
||||
return warnings, fmt.Errorf("boot ISO image type %q requires specifying a URL from which to retrieve the OSTree commit", t.name)
|
||||
if options.OSTree == nil || options.OSTree.URL == "" {
|
||||
return nil, fmt.Errorf("boot ISO image type %q requires specifying a URL from which to retrieve the OSTree commit", t.name)
|
||||
}
|
||||
|
||||
if t.name == "edge-simplified-installer" {
|
||||
|
|
@ -345,7 +341,7 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
|
|||
|
||||
if t.name == "edge-raw-image" || t.name == "edge-ami" || t.name == "edge-vsphere" {
|
||||
// ostree-based bootable images require a URL from which to pull a payload commit
|
||||
if ostreeURL == "" {
|
||||
if options.OSTree == nil || options.OSTree.URL == "" {
|
||||
return warnings, fmt.Errorf("%q images require specifying a URL from which to retrieve the OSTree commit", t.name)
|
||||
}
|
||||
|
||||
|
|
|
|||
83
vendor/github.com/osbuild/images/pkg/manifest/os.go
generated
vendored
83
vendor/github.com/osbuild/images/pkg/manifest/os.go
generated
vendored
|
|
@ -93,35 +93,36 @@ type OSCustomizations struct {
|
|||
ShellInit []shell.InitFile
|
||||
|
||||
// TODO: drop osbuild types from the API
|
||||
Firewall *osbuild.FirewallStageOptions
|
||||
Grub2Config *osbuild.GRUB2Config
|
||||
Sysconfig []*osbuild.SysconfigStageOptions
|
||||
SystemdLogind []*osbuild.SystemdLogindStageOptions
|
||||
CloudInit []*osbuild.CloudInitStageOptions
|
||||
Modprobe []*osbuild.ModprobeStageOptions
|
||||
DracutConf []*osbuild.DracutConfStageOptions
|
||||
SystemdUnit []*osbuild.SystemdUnitStageOptions
|
||||
Authselect *osbuild.AuthselectStageOptions
|
||||
SELinuxConfig *osbuild.SELinuxConfigStageOptions
|
||||
Tuned *osbuild.TunedStageOptions
|
||||
Tmpfilesd []*osbuild.TmpfilesdStageOptions
|
||||
PamLimitsConf []*osbuild.PamLimitsConfStageOptions
|
||||
Sysctld []*osbuild.SysctldStageOptions
|
||||
DNFConfig []*osbuild.DNFConfigStageOptions
|
||||
DNFAutomaticConfig *osbuild.DNFAutomaticConfigStageOptions
|
||||
YUMConfig *osbuild.YumConfigStageOptions
|
||||
YUMRepos []*osbuild.YumReposStageOptions
|
||||
SshdConfig *osbuild.SshdConfigStageOptions
|
||||
GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions
|
||||
AuthConfig *osbuild.AuthconfigStageOptions
|
||||
PwQuality *osbuild.PwqualityConfStageOptions
|
||||
OpenSCAPConfig *osbuild.OscapRemediationStageOptions
|
||||
NTPServers []osbuild.ChronyConfigServer
|
||||
WAAgentConfig *osbuild.WAAgentConfStageOptions
|
||||
UdevRules *osbuild.UdevRulesStageOptions
|
||||
WSLConfig *osbuild.WSLConfStageOptions
|
||||
LeapSecTZ *string
|
||||
FactAPIType *facts.APIType
|
||||
Firewall *osbuild.FirewallStageOptions
|
||||
Grub2Config *osbuild.GRUB2Config
|
||||
Sysconfig []*osbuild.SysconfigStageOptions
|
||||
SystemdLogind []*osbuild.SystemdLogindStageOptions
|
||||
CloudInit []*osbuild.CloudInitStageOptions
|
||||
Modprobe []*osbuild.ModprobeStageOptions
|
||||
DracutConf []*osbuild.DracutConfStageOptions
|
||||
SystemdUnit []*osbuild.SystemdUnitStageOptions
|
||||
Authselect *osbuild.AuthselectStageOptions
|
||||
SELinuxConfig *osbuild.SELinuxConfigStageOptions
|
||||
Tuned *osbuild.TunedStageOptions
|
||||
Tmpfilesd []*osbuild.TmpfilesdStageOptions
|
||||
PamLimitsConf []*osbuild.PamLimitsConfStageOptions
|
||||
Sysctld []*osbuild.SysctldStageOptions
|
||||
DNFConfig []*osbuild.DNFConfigStageOptions
|
||||
DNFAutomaticConfig *osbuild.DNFAutomaticConfigStageOptions
|
||||
YUMConfig *osbuild.YumConfigStageOptions
|
||||
YUMRepos []*osbuild.YumReposStageOptions
|
||||
SshdConfig *osbuild.SshdConfigStageOptions
|
||||
GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions
|
||||
AuthConfig *osbuild.AuthconfigStageOptions
|
||||
PwQuality *osbuild.PwqualityConfStageOptions
|
||||
OpenSCAPTailorConfig *osbuild.OscapAutotailorStageOptions
|
||||
OpenSCAPConfig *osbuild.OscapRemediationStageOptions
|
||||
NTPServers []osbuild.ChronyConfigServer
|
||||
WAAgentConfig *osbuild.WAAgentConfStageOptions
|
||||
UdevRules *osbuild.UdevRulesStageOptions
|
||||
WSLConfig *osbuild.WSLConfStageOptions
|
||||
LeapSecTZ *string
|
||||
FactAPIType *facts.APIType
|
||||
|
||||
Subscription *subscription.ImageOptions
|
||||
RHSMConfig map[subscription.RHSMStatus]*osbuild.RHSMStageOptions
|
||||
|
|
@ -293,6 +294,10 @@ func (p *OS) getBuildPackages(distro Distro) []string {
|
|||
packages = append(packages, "skopeo")
|
||||
}
|
||||
|
||||
if p.OpenSCAPTailorConfig != nil {
|
||||
packages = append(packages, "openscap-utils")
|
||||
}
|
||||
|
||||
return packages
|
||||
}
|
||||
|
||||
|
|
@ -655,10 +660,6 @@ func (p *OS) serialize() osbuild.Pipeline {
|
|||
pipeline.AddStage(bootloader)
|
||||
}
|
||||
|
||||
if p.OpenSCAPConfig != nil {
|
||||
pipeline.AddStage(osbuild.NewOscapRemediationStage(p.OpenSCAPConfig))
|
||||
}
|
||||
|
||||
if p.FactAPIType != nil {
|
||||
pipeline.AddStage(osbuild.NewRHSMFactsStage(&osbuild.RHSMFactsStageOptions{
|
||||
Facts: osbuild.RHSMFacts{
|
||||
|
|
@ -715,6 +716,22 @@ func (p *OS) serialize() osbuild.Pipeline {
|
|||
pipeline.AddStage(osbuild.NewWSLConfStage(wslConf))
|
||||
}
|
||||
|
||||
if p.OpenSCAPTailorConfig != nil {
|
||||
if p.OpenSCAPConfig == nil {
|
||||
// This is a programming error, since it doesn't make sense
|
||||
// to have tailoring configs without openscap config.
|
||||
panic(fmt.Errorf("OpenSCAP autotailoring cannot be set if no OpenSCAP config has been provided"))
|
||||
}
|
||||
pipeline.AddStage(osbuild.NewOscapAutotailorStage(p.OpenSCAPTailorConfig))
|
||||
}
|
||||
|
||||
// NOTE: We need to run the OpenSCAP stages as the last stage before SELinux
|
||||
// since the remediation may change file permissions and other aspects of the
|
||||
// hardened image
|
||||
if p.OpenSCAPConfig != nil {
|
||||
pipeline.AddStage(osbuild.NewOscapRemediationStage(p.OpenSCAPConfig))
|
||||
}
|
||||
|
||||
if p.SElinux != "" {
|
||||
pipeline.AddStage(osbuild.NewSELinuxStage(&osbuild.SELinuxStageOptions{
|
||||
FileContexts: fmt.Sprintf("etc/selinux/%s/contexts/files/file_contexts", p.SElinux),
|
||||
|
|
|
|||
47
vendor/github.com/osbuild/images/pkg/osbuild/oscap_autotailor_stage.go
generated
vendored
Normal file
47
vendor/github.com/osbuild/images/pkg/osbuild/oscap_autotailor_stage.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package osbuild
|
||||
|
||||
import "fmt"
|
||||
|
||||
type OscapAutotailorStageOptions struct {
|
||||
Filepath string `json:"filepath"`
|
||||
Config OscapAutotailorConfig `json:"config"`
|
||||
}
|
||||
type OscapAutotailorConfig struct {
|
||||
OscapConfig
|
||||
NewProfile string `json:"new_profile"`
|
||||
Selected []string `json:"selected,omitempty"`
|
||||
Unselected []string `json:"unselected,omitempty"`
|
||||
}
|
||||
|
||||
func (OscapAutotailorStageOptions) isStageOptions() {}
|
||||
|
||||
func (c OscapAutotailorConfig) validate() error {
|
||||
if c.NewProfile == "" {
|
||||
return fmt.Errorf("'new_profile' must be specified")
|
||||
}
|
||||
// reuse the oscap validation
|
||||
return c.OscapConfig.validate()
|
||||
}
|
||||
|
||||
func NewOscapAutotailorStage(options *OscapAutotailorStageOptions) *Stage {
|
||||
if err := options.Config.validate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &Stage{
|
||||
Type: "org.osbuild.oscap.autotailor",
|
||||
Options: options,
|
||||
}
|
||||
}
|
||||
|
||||
func NewOscapAutotailorStageOptions(filepath string, oscapOptions OscapConfig, autotailorOptions OscapAutotailorConfig) *OscapAutotailorStageOptions {
|
||||
return &OscapAutotailorStageOptions{
|
||||
Filepath: filepath,
|
||||
Config: OscapAutotailorConfig{
|
||||
OscapConfig: oscapOptions,
|
||||
NewProfile: autotailorOptions.NewProfile,
|
||||
Selected: autotailorOptions.Selected,
|
||||
Unselected: autotailorOptions.Unselected,
|
||||
},
|
||||
}
|
||||
}
|
||||
1
vendor/github.com/osbuild/images/pkg/osbuild/oscap_remediation_stage.go
generated
vendored
1
vendor/github.com/osbuild/images/pkg/osbuild/oscap_remediation_stage.go
generated
vendored
|
|
@ -76,6 +76,7 @@ func NewOscapRemediationStageOptions(options OscapConfig) *OscapRemediationStage
|
|||
ProfileID: options.ProfileID,
|
||||
Datastream: options.Datastream,
|
||||
DatastreamID: options.DatastreamID,
|
||||
Tailoring: options.Tailoring,
|
||||
XCCDFID: options.XCCDFID,
|
||||
BenchmarkID: options.BenchmarkID,
|
||||
ArfResult: options.ArfResult,
|
||||
|
|
|
|||
2
vendor/github.com/osbuild/images/pkg/ostree/errors.go
generated
vendored
2
vendor/github.com/osbuild/images/pkg/ostree/errors.go
generated
vendored
|
|
@ -18,7 +18,7 @@ func NewResolveRefError(msg string, args ...interface{}) ResolveRefError {
|
|||
return ResolveRefError{msg: fmt.Sprintf(msg, args...)}
|
||||
}
|
||||
|
||||
// InvalidParamsError is returned when a parameter is invalid (e.g., malformed
|
||||
// RefError is returned when a parameter is invalid (e.g., malformed
|
||||
// or contains illegal characters).
|
||||
type RefError struct {
|
||||
msg string
|
||||
|
|
|
|||
86
vendor/github.com/osbuild/images/pkg/ostree/ostree.go
generated
vendored
86
vendor/github.com/osbuild/images/pkg/ostree/ostree.go
generated
vendored
|
|
@ -17,7 +17,10 @@ import (
|
|||
"github.com/osbuild/images/pkg/rhsm"
|
||||
)
|
||||
|
||||
var ostreeRefRE = regexp.MustCompile(`^(?:[\w\d][-._\w\d]*\/)*[\w\d][-._\w\d]*$`)
|
||||
var (
|
||||
ostreeRefRE = regexp.MustCompile(`^(?:[\w\d][-._\w\d]*\/)*[\w\d][-._\w\d]*$`)
|
||||
ostreeCommitRE = regexp.MustCompile("^[0-9a-f]{64}$")
|
||||
)
|
||||
|
||||
// SourceSpec serves as input for ResolveParams, and contains all necessary
|
||||
// variables to resolve a ref, which can then be turned into a CommitSpec.
|
||||
|
|
@ -71,6 +74,54 @@ type ImageOptions struct {
|
|||
RHSM bool `json:"rhsm"`
|
||||
}
|
||||
|
||||
// Validate the image options. This doesn't verify the existence of any remote
|
||||
// objects and does not guarantee that refs will be successfully resolved. It
|
||||
// only checks that the values and value combinations are valid.
|
||||
//
|
||||
// The function checks the following:
|
||||
// - The ImageRef, if specified, is a valid ref and does not look like a
|
||||
// checksum.
|
||||
// - The ParentRef, if specified, must be a valid ref or a checksum.
|
||||
// - If the ParentRef is specified, the URL must also be specified.
|
||||
// - URLs must be valid.
|
||||
func (options ImageOptions) Validate() error {
|
||||
if ref := options.ImageRef; ref != "" {
|
||||
// image ref must not look like a checksum
|
||||
if verifyChecksum(ref) {
|
||||
return NewRefError("ostree image ref looks like a checksum %q", ref)
|
||||
}
|
||||
if !verifyRef(ref) {
|
||||
return NewRefError("invalid ostree image ref %q", ref)
|
||||
}
|
||||
}
|
||||
|
||||
if parent := options.ParentRef; parent != "" {
|
||||
if !verifyChecksum(parent) && !verifyRef(parent) {
|
||||
return NewRefError("invalid ostree parent ref or commit %q", parent)
|
||||
}
|
||||
|
||||
// valid URL required
|
||||
if purl := options.URL; purl == "" {
|
||||
return NewParameterComboError("ostree parent ref specified, but no URL to retrieve it")
|
||||
}
|
||||
}
|
||||
|
||||
// whether required or not, any URL specified must be valid
|
||||
if purl := options.URL; purl != "" {
|
||||
if _, err := url.ParseRequestURI(purl); err != nil {
|
||||
return fmt.Errorf("ostree URL %q is invalid", purl)
|
||||
}
|
||||
}
|
||||
|
||||
if curl := options.ContentURL; curl != "" {
|
||||
if _, err := url.ParseRequestURI(curl); err != nil {
|
||||
return fmt.Errorf("ostree content URL %q is invalid", curl)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remote defines the options that can be set for an OSTree Remote configuration.
|
||||
type Remote struct {
|
||||
Name string
|
||||
|
|
@ -79,10 +130,14 @@ type Remote struct {
|
|||
GPGKeyPaths []string
|
||||
}
|
||||
|
||||
func VerifyRef(ref string) bool {
|
||||
func verifyRef(ref string) bool {
|
||||
return len(ref) > 0 && ostreeRefRE.MatchString(ref)
|
||||
}
|
||||
|
||||
func verifyChecksum(commit string) bool {
|
||||
return len(commit) > 0 && ostreeCommitRE.MatchString(commit)
|
||||
}
|
||||
|
||||
// ResolveRef resolves the URL path specified by the location and ref
|
||||
// (location+"refs/heads/"+ref) and returns the commit ID for the named ref. If
|
||||
// there is an error, it will be of type ResolveRefError.
|
||||
|
|
@ -109,7 +164,7 @@ func ResolveRef(location, ref string, consumerCerts bool, subs *rhsm.Subscriptio
|
|||
if ca != nil {
|
||||
caCertPEM, err := os.ReadFile(*ca)
|
||||
if err != nil {
|
||||
return "", NewResolveRefError("error adding rhsm certificates when resolving ref")
|
||||
return "", NewResolveRefError("error adding rhsm certificates when resolving ref: %s", err)
|
||||
}
|
||||
roots := x509.NewCertPool()
|
||||
ok := roots.AppendCertsFromPEM(caCertPEM)
|
||||
|
|
@ -121,7 +176,7 @@ func ResolveRef(location, ref string, consumerCerts bool, subs *rhsm.Subscriptio
|
|||
|
||||
cert, err := tls.LoadX509KeyPair(subs.Consumer.ConsumerCert, subs.Consumer.ConsumerKey)
|
||||
if err != nil {
|
||||
return "", NewResolveRefError("error adding rhsm certificates when resolving ref")
|
||||
return "", NewResolveRefError("error adding rhsm certificates when resolving ref: %s", err)
|
||||
}
|
||||
tlsConf.Certificates = []tls.Certificate{cert}
|
||||
|
||||
|
|
@ -137,7 +192,7 @@ func ResolveRef(location, ref string, consumerCerts bool, subs *rhsm.Subscriptio
|
|||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return "", NewResolveRefError("error adding rhsm certificates when resolving ref")
|
||||
return "", NewResolveRefError("error preparing ostree resolve request: %s", err)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
|
@ -160,26 +215,37 @@ func ResolveRef(location, ref string, consumerCerts bool, subs *rhsm.Subscriptio
|
|||
return checksum, nil
|
||||
}
|
||||
|
||||
// Resolve the ostree source specification into a commit specification.
|
||||
// Resolve the ostree source specification to a commit specification.
|
||||
//
|
||||
// If a URL is defined in the source specification, the checksum of the ref is
|
||||
// resolved, otherwise the checksum is an empty string. Failure to resolve the
|
||||
// checksum results in a ResolveRefError.
|
||||
//
|
||||
// If the ref is already a checksum (64 alphanumeric characters), it is not
|
||||
// resolved or checked against the repository.
|
||||
//
|
||||
// If the ref is malformed, the function returns with a RefError.
|
||||
func Resolve(source SourceSpec) (CommitSpec, error) {
|
||||
if !VerifyRef(source.Ref) {
|
||||
return CommitSpec{}, NewRefError("Invalid ostree ref %q", source.Ref)
|
||||
}
|
||||
|
||||
commit := CommitSpec{
|
||||
Ref: source.Ref,
|
||||
URL: source.URL,
|
||||
}
|
||||
|
||||
if source.RHSM {
|
||||
commit.Secrets = "org.osbuild.rhsm.consumer"
|
||||
}
|
||||
|
||||
if verifyChecksum(source.Ref) {
|
||||
// the ref is a commit: return as is
|
||||
commit.Checksum = source.Ref
|
||||
return commit, nil
|
||||
}
|
||||
|
||||
if !verifyRef(source.Ref) {
|
||||
// the ref is not a commit and it's also an invalid ref
|
||||
return CommitSpec{}, NewRefError("Invalid ostree ref or commit %q", source.Ref)
|
||||
}
|
||||
|
||||
// URL set: Resolve checksum
|
||||
if source.URL != "" {
|
||||
// If a URL is specified, we need to fetch the commit at the URL.
|
||||
|
|
|
|||
28
vendor/golang.org/x/net/html/render.go
generated
vendored
28
vendor/golang.org/x/net/html/render.go
generated
vendored
|
|
@ -194,9 +194,8 @@ func render1(w writer, n *Node) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Render any child nodes.
|
||||
switch n.Data {
|
||||
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
|
||||
// Render any child nodes
|
||||
if childTextNodesAreLiteral(n) {
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
if c.Type == TextNode {
|
||||
if _, err := w.WriteString(c.Data); err != nil {
|
||||
|
|
@ -213,7 +212,7 @@ func render1(w writer, n *Node) error {
|
|||
// last element in the file, with no closing tag.
|
||||
return plaintextAbort
|
||||
}
|
||||
default:
|
||||
} else {
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
if err := render1(w, c); err != nil {
|
||||
return err
|
||||
|
|
@ -231,6 +230,27 @@ func render1(w writer, n *Node) error {
|
|||
return w.WriteByte('>')
|
||||
}
|
||||
|
||||
func childTextNodesAreLiteral(n *Node) bool {
|
||||
// Per WHATWG HTML 13.3, if the parent of the current node is a style,
|
||||
// script, xmp, iframe, noembed, noframes, or plaintext element, and the
|
||||
// current node is a text node, append the value of the node's data
|
||||
// literally. The specification is not explicit about it, but we only
|
||||
// enforce this if we are in the HTML namespace (i.e. when the namespace is
|
||||
// "").
|
||||
// NOTE: we also always include noscript elements, although the
|
||||
// specification states that they should only be rendered as such if
|
||||
// scripting is enabled for the node (which is not something we track).
|
||||
if n.Namespace != "" {
|
||||
return false
|
||||
}
|
||||
switch n.Data {
|
||||
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
|
||||
// quotes, but if s contains a double quote, it will use single quotes.
|
||||
// It is used for writing the identifiers in a doctype declaration.
|
||||
|
|
|
|||
35
vendor/golang.org/x/net/http2/transport.go
generated
vendored
35
vendor/golang.org/x/net/http2/transport.go
generated
vendored
|
|
@ -19,6 +19,7 @@ import (
|
|||
"io/fs"
|
||||
"log"
|
||||
"math"
|
||||
"math/bits"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
|
|
@ -518,11 +519,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
func authorityAddr(scheme string, authority string) (addr string) {
|
||||
host, port, err := net.SplitHostPort(authority)
|
||||
if err != nil { // authority didn't have a port
|
||||
host = authority
|
||||
port = ""
|
||||
}
|
||||
if port == "" { // authority's port was empty
|
||||
port = "443"
|
||||
if scheme == "http" {
|
||||
port = "80"
|
||||
}
|
||||
host = authority
|
||||
}
|
||||
if a, err := idna.ToASCII(host); err == nil {
|
||||
host = a
|
||||
|
|
@ -1677,7 +1681,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
|
|||
return int(n) // doesn't truncate; max is 512K
|
||||
}
|
||||
|
||||
var bufPool sync.Pool // of *[]byte
|
||||
// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
|
||||
// streaming requests using small frame sizes occupy large buffers initially allocated for prior
|
||||
// requests needing big buffers. The size ranges are as follows:
|
||||
// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
|
||||
// {256 KB, 512 KB], {512 KB, infinity}
|
||||
// In practice, the maximum scratch buffer size should not exceed 512 KB due to
|
||||
// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
|
||||
// It exists mainly as a safety measure, for potential future increases in max buffer size.
|
||||
var bufPools [7]sync.Pool // of *[]byte
|
||||
func bufPoolIndex(size int) int {
|
||||
if size <= 16384 {
|
||||
return 0
|
||||
}
|
||||
size -= 1
|
||||
bits := bits.Len(uint(size))
|
||||
index := bits - 14
|
||||
if index >= len(bufPools) {
|
||||
return len(bufPools) - 1
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
||||
cc := cs.cc
|
||||
|
|
@ -1695,12 +1719,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
|||
// Scratch buffer for reading into & writing from.
|
||||
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
|
||||
var buf []byte
|
||||
if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
|
||||
defer bufPool.Put(bp)
|
||||
index := bufPoolIndex(scratchLen)
|
||||
if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
|
||||
defer bufPools[index].Put(bp)
|
||||
buf = *bp
|
||||
} else {
|
||||
buf = make([]byte, scratchLen)
|
||||
defer bufPool.Put(&buf)
|
||||
defer bufPools[index].Put(&buf)
|
||||
}
|
||||
|
||||
var sawEOF bool
|
||||
|
|
|
|||
1
vendor/golang.org/x/oauth2/google/appengine_gen1.go
generated
vendored
1
vendor/golang.org/x/oauth2/google/appengine_gen1.go
generated
vendored
|
|
@ -3,7 +3,6 @@
|
|||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file applies to App Engine first generation runtimes (<= Go 1.9).
|
||||
|
||||
|
|
|
|||
1
vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
generated
vendored
1
vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
generated
vendored
|
|
@ -3,7 +3,6 @@
|
|||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible.
|
||||
|
||||
|
|
|
|||
1
vendor/golang.org/x/oauth2/internal/client_appengine.go
generated
vendored
1
vendor/golang.org/x/oauth2/internal/client_appengine.go
generated
vendored
|
|
@ -3,7 +3,6 @@
|
|||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
|
|
|
|||
2
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
2
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
|
|
@ -624,7 +624,7 @@ ccflags="$@"
|
|||
$2 ~ /^MEM/ ||
|
||||
$2 ~ /^WG/ ||
|
||||
$2 ~ /^FIB_RULE_/ ||
|
||||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
|
||||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)}
|
||||
$2 ~ /^__WCOREFLAG$/ {next}
|
||||
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
|
||||
|
||||
|
|
|
|||
14
vendor/golang.org/x/sys/unix/mmap_nomremap.go
generated
vendored
Normal file
14
vendor/golang.org/x/sys/unix/mmap_nomremap.go
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris
|
||||
// +build aix darwin dragonfly freebsd openbsd solaris
|
||||
|
||||
package unix
|
||||
|
||||
var mapper = &mmapper{
|
||||
active: make(map[*byte][]byte),
|
||||
mmap: mmap,
|
||||
munmap: munmap,
|
||||
}
|
||||
21
vendor/golang.org/x/sys/unix/mremap.go
generated
vendored
21
vendor/golang.org/x/sys/unix/mremap.go
generated
vendored
|
|
@ -2,8 +2,8 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
//go:build linux || netbsd
|
||||
// +build linux netbsd
|
||||
|
||||
package unix
|
||||
|
||||
|
|
@ -14,8 +14,17 @@ type mremapMmapper struct {
|
|||
mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error)
|
||||
}
|
||||
|
||||
var mapper = &mremapMmapper{
|
||||
mmapper: mmapper{
|
||||
active: make(map[*byte][]byte),
|
||||
mmap: mmap,
|
||||
munmap: munmap,
|
||||
},
|
||||
mremap: mremap,
|
||||
}
|
||||
|
||||
func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
|
||||
if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&MREMAP_FIXED != 0 {
|
||||
if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 {
|
||||
return nil, EINVAL
|
||||
}
|
||||
|
||||
|
|
@ -32,9 +41,13 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [
|
|||
}
|
||||
bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength)
|
||||
pNew := &bNew[cap(bNew)-1]
|
||||
if flags&MREMAP_DONTUNMAP == 0 {
|
||||
if flags&mremapDontunmap == 0 {
|
||||
delete(m.active, pOld)
|
||||
}
|
||||
m.active[pNew] = bNew
|
||||
return bNew, nil
|
||||
}
|
||||
|
||||
func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
|
||||
return mapper.Mremap(oldData, newLength, flags)
|
||||
}
|
||||
|
|
|
|||
15
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
15
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
|
|
@ -535,21 +535,6 @@ func Fsync(fd int) error {
|
|||
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg
|
||||
|
||||
//sys munmap(addr uintptr, length uintptr) (err error)
|
||||
|
||||
var mapper = &mmapper{
|
||||
active: make(map[*byte][]byte),
|
||||
mmap: mmap,
|
||||
munmap: munmap,
|
||||
}
|
||||
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return mapper.Mmap(fd, offset, length, prot, flags)
|
||||
}
|
||||
|
||||
func Munmap(b []byte) (err error) {
|
||||
return mapper.Munmap(b)
|
||||
}
|
||||
|
||||
//sys Madvise(b []byte, advice int) (err error)
|
||||
//sys Mprotect(b []byte, prot int) (err error)
|
||||
//sys Mlock(b []byte) (err error)
|
||||
|
|
|
|||
14
vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
14
vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
|
|
@ -601,20 +601,6 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
|
|||
// Gethostuuid(uuid *byte, timeout *Timespec) (err error)
|
||||
// Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error)
|
||||
|
||||
var mapper = &mmapper{
|
||||
active: make(map[*byte][]byte),
|
||||
mmap: mmap,
|
||||
munmap: munmap,
|
||||
}
|
||||
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return mapper.Mmap(fd, offset, length, prot, flags)
|
||||
}
|
||||
|
||||
func Munmap(b []byte) (err error) {
|
||||
return mapper.Munmap(b)
|
||||
}
|
||||
|
||||
//sys Madvise(b []byte, behav int) (err error)
|
||||
//sys Mlock(b []byte) (err error)
|
||||
//sys Mlockall(flags int) (err error)
|
||||
|
|
|
|||
50
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
50
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
|
|
@ -510,30 +510,36 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Find size.
|
||||
n := uintptr(0)
|
||||
if err := sysctl(mib, nil, &n, nil, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if n%SizeofKinfoProc != 0 {
|
||||
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
|
||||
}
|
||||
for {
|
||||
// Find size.
|
||||
n := uintptr(0)
|
||||
if err := sysctl(mib, nil, &n, nil, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if n%SizeofKinfoProc != 0 {
|
||||
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
|
||||
}
|
||||
|
||||
// Read into buffer of that size.
|
||||
buf := make([]KinfoProc, n/SizeofKinfoProc)
|
||||
if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n%SizeofKinfoProc != 0 {
|
||||
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
|
||||
}
|
||||
// Read into buffer of that size.
|
||||
buf := make([]KinfoProc, n/SizeofKinfoProc)
|
||||
if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil {
|
||||
if err == ENOMEM {
|
||||
// Process table grew. Try again.
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if n%SizeofKinfoProc != 0 {
|
||||
return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc)
|
||||
}
|
||||
|
||||
// The actual call may return less than the original reported required
|
||||
// size so ensure we deal with that.
|
||||
return buf[:n/SizeofKinfoProc], nil
|
||||
// The actual call may return less than the original reported required
|
||||
// size so ensure we deal with that.
|
||||
return buf[:n/SizeofKinfoProc], nil
|
||||
}
|
||||
}
|
||||
|
||||
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
|
||||
|
|
|
|||
63
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
63
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
|
|
@ -1885,7 +1885,7 @@ func Getpgrp() (pid int) {
|
|||
//sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
|
||||
//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
|
||||
//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
|
||||
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6
|
||||
//sys pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error)
|
||||
//sys read(fd int, p []byte) (n int, err error)
|
||||
//sys Removexattr(path string, attr string) (err error)
|
||||
//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)
|
||||
|
|
@ -2125,28 +2125,6 @@ func writevRacedetect(iovecs []Iovec, n int) {
|
|||
// mmap varies by architecture; see syscall_linux_*.go.
|
||||
//sys munmap(addr uintptr, length uintptr) (err error)
|
||||
//sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error)
|
||||
|
||||
var mapper = &mremapMmapper{
|
||||
mmapper: mmapper{
|
||||
active: make(map[*byte][]byte),
|
||||
mmap: mmap,
|
||||
munmap: munmap,
|
||||
},
|
||||
mremap: mremap,
|
||||
}
|
||||
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return mapper.Mmap(fd, offset, length, prot, flags)
|
||||
}
|
||||
|
||||
func Munmap(b []byte) (err error) {
|
||||
return mapper.Munmap(b)
|
||||
}
|
||||
|
||||
func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
|
||||
return mapper.Mremap(oldData, newLength, flags)
|
||||
}
|
||||
|
||||
//sys Madvise(b []byte, advice int) (err error)
|
||||
//sys Mprotect(b []byte, prot int) (err error)
|
||||
//sys Mlock(b []byte) (err error)
|
||||
|
|
@ -2155,6 +2133,12 @@ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
|
|||
//sys Munlock(b []byte) (err error)
|
||||
//sys Munlockall() (err error)
|
||||
|
||||
const (
|
||||
mremapFixed = MREMAP_FIXED
|
||||
mremapDontunmap = MREMAP_DONTUNMAP
|
||||
mremapMaymove = MREMAP_MAYMOVE
|
||||
)
|
||||
|
||||
// Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd,
|
||||
// using the specified flags.
|
||||
func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
|
||||
|
|
@ -2454,6 +2438,39 @@ func Getresgid() (rgid, egid, sgid int) {
|
|||
return int(r), int(e), int(s)
|
||||
}
|
||||
|
||||
// Pselect is a wrapper around the Linux pselect6 system call.
|
||||
// This version does not modify the timeout argument.
|
||||
func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
|
||||
// Per https://man7.org/linux/man-pages/man2/select.2.html#NOTES,
|
||||
// The Linux pselect6() system call modifies its timeout argument.
|
||||
// [Not modifying the argument] is the behavior required by POSIX.1-2001.
|
||||
var mutableTimeout *Timespec
|
||||
if timeout != nil {
|
||||
mutableTimeout = new(Timespec)
|
||||
*mutableTimeout = *timeout
|
||||
}
|
||||
|
||||
// The final argument of the pselect6() system call is not a
|
||||
// sigset_t * pointer, but is instead a structure
|
||||
var kernelMask *sigset_argpack
|
||||
if sigmask != nil {
|
||||
wordBits := 32 << (^uintptr(0) >> 63) // see math.intSize
|
||||
|
||||
// A sigset stores one bit per signal,
|
||||
// offset by 1 (because signal 0 does not exist).
|
||||
// So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉.
|
||||
sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits)
|
||||
|
||||
sigsetBytes := uintptr(sigsetWords * (wordBits / 8))
|
||||
kernelMask = &sigset_argpack{
|
||||
ss: sigmask,
|
||||
ssLen: sigsetBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return pselect6(nfd, r, w, e, mutableTimeout, kernelMask)
|
||||
}
|
||||
|
||||
/*
|
||||
* Unimplemented
|
||||
*/
|
||||
|
|
|
|||
2
vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
generated
vendored
|
|
@ -40,7 +40,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
|||
if timeout != nil {
|
||||
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
|
||||
}
|
||||
return Pselect(nfd, r, w, e, ts, nil)
|
||||
return pselect6(nfd, r, w, e, ts, nil)
|
||||
}
|
||||
|
||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||
|
|
|
|||
2
vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
generated
vendored
|
|
@ -33,7 +33,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
|||
if timeout != nil {
|
||||
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
|
||||
}
|
||||
return Pselect(nfd, r, w, e, ts, nil)
|
||||
return pselect6(nfd, r, w, e, ts, nil)
|
||||
}
|
||||
|
||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||
|
|
|
|||
2
vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
generated
vendored
|
|
@ -28,7 +28,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
|||
if timeout != nil {
|
||||
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
|
||||
}
|
||||
return Pselect(nfd, r, w, e, ts, nil)
|
||||
return pselect6(nfd, r, w, e, ts, nil)
|
||||
}
|
||||
|
||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||
|
|
|
|||
2
vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
generated
vendored
|
|
@ -31,7 +31,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
|||
if timeout != nil {
|
||||
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
|
||||
}
|
||||
return Pselect(nfd, r, w, e, ts, nil)
|
||||
return pselect6(nfd, r, w, e, ts, nil)
|
||||
}
|
||||
|
||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||
|
|
|
|||
13
vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
generated
vendored
13
vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
generated
vendored
|
|
@ -32,7 +32,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
|||
if timeout != nil {
|
||||
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
|
||||
}
|
||||
return Pselect(nfd, r, w, e, ts, nil)
|
||||
return pselect6(nfd, r, w, e, ts, nil)
|
||||
}
|
||||
|
||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||
|
|
@ -177,3 +177,14 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
|
|||
}
|
||||
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
|
||||
}
|
||||
|
||||
//sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error)
|
||||
|
||||
func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) {
|
||||
var setSize uintptr
|
||||
|
||||
if set != nil {
|
||||
setSize = uintptr(unsafe.Sizeof(*set))
|
||||
}
|
||||
return riscvHWProbe(pairs, setSize, set, flags)
|
||||
}
|
||||
|
|
|
|||
13
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
13
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
|
|
@ -360,6 +360,18 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
|
|||
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
|
||||
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
|
||||
|
||||
const (
|
||||
mremapFixed = MAP_FIXED
|
||||
mremapDontunmap = 0
|
||||
mremapMaymove = 0
|
||||
)
|
||||
|
||||
//sys mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP
|
||||
|
||||
func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) {
|
||||
return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags)
|
||||
}
|
||||
|
||||
/*
|
||||
* Unimplemented
|
||||
*/
|
||||
|
|
@ -564,7 +576,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
|
|||
// mq_timedreceive
|
||||
// mq_timedsend
|
||||
// mq_unlink
|
||||
// mremap
|
||||
// msgget
|
||||
// msgrcv
|
||||
// msgsnd
|
||||
|
|
|
|||
14
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
14
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
|
|
@ -716,20 +716,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
var mapper = &mmapper{
|
||||
active: make(map[*byte][]byte),
|
||||
mmap: mmap,
|
||||
munmap: munmap,
|
||||
}
|
||||
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return mapper.Mmap(fd, offset, length, prot, flags)
|
||||
}
|
||||
|
||||
func Munmap(b []byte) (err error) {
|
||||
return mapper.Munmap(b)
|
||||
}
|
||||
|
||||
// Event Ports
|
||||
|
||||
type fileObjCookie struct {
|
||||
|
|
|
|||
8
vendor/golang.org/x/sys/unix/syscall_unix.go
generated
vendored
8
vendor/golang.org/x/sys/unix/syscall_unix.go
generated
vendored
|
|
@ -147,6 +147,14 @@ func (m *mmapper) Munmap(data []byte) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return mapper.Mmap(fd, offset, length, prot, flags)
|
||||
}
|
||||
|
||||
func Munmap(b []byte) (err error) {
|
||||
return mapper.Munmap(b)
|
||||
}
|
||||
|
||||
func Read(fd int, p []byte) (n int, err error) {
|
||||
n, err = read(fd, p)
|
||||
if raceenabled {
|
||||
|
|
|
|||
14
vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
generated
vendored
14
vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
generated
vendored
|
|
@ -285,25 +285,11 @@ func Close(fd int) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
var mapper = &mmapper{
|
||||
active: make(map[*byte][]byte),
|
||||
mmap: mmap,
|
||||
munmap: munmap,
|
||||
}
|
||||
|
||||
// Dummy function: there are no semantics for Madvise on z/OS
|
||||
func Madvise(b []byte, advice int) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
|
||||
return mapper.Mmap(fd, offset, length, prot, flags)
|
||||
}
|
||||
|
||||
func Munmap(b []byte) (err error) {
|
||||
return mapper.Munmap(b)
|
||||
}
|
||||
|
||||
//sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A
|
||||
//sysnb Getegid() (egid int)
|
||||
//sysnb Geteuid() (uid int)
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x127a
|
||||
BLKBSZGET = 0x80041270
|
||||
BLKBSZSET = 0x40041271
|
||||
BLKDISCARD = 0x1277
|
||||
BLKDISCARDZEROES = 0x127c
|
||||
BLKFLSBUF = 0x1261
|
||||
BLKFRAGET = 0x1265
|
||||
BLKFRASET = 0x1264
|
||||
BLKGETDISKSEQ = 0x80081280
|
||||
BLKGETSIZE = 0x1260
|
||||
BLKGETSIZE64 = 0x80041272
|
||||
BLKIOMIN = 0x1278
|
||||
BLKIOOPT = 0x1279
|
||||
BLKPBSZGET = 0x127b
|
||||
BLKRAGET = 0x1263
|
||||
BLKRASET = 0x1262
|
||||
BLKROGET = 0x125e
|
||||
BLKROSET = 0x125d
|
||||
BLKROTATIONAL = 0x127e
|
||||
BLKRRPART = 0x125f
|
||||
BLKSECDISCARD = 0x127d
|
||||
BLKSECTGET = 0x1267
|
||||
BLKSECTSET = 0x1266
|
||||
BLKSSZGET = 0x1268
|
||||
BLKZEROOUT = 0x127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x127a
|
||||
BLKBSZGET = 0x80081270
|
||||
BLKBSZSET = 0x40081271
|
||||
BLKDISCARD = 0x1277
|
||||
BLKDISCARDZEROES = 0x127c
|
||||
BLKFLSBUF = 0x1261
|
||||
BLKFRAGET = 0x1265
|
||||
BLKFRASET = 0x1264
|
||||
BLKGETDISKSEQ = 0x80081280
|
||||
BLKGETSIZE = 0x1260
|
||||
BLKGETSIZE64 = 0x80081272
|
||||
BLKIOMIN = 0x1278
|
||||
BLKIOOPT = 0x1279
|
||||
BLKPBSZGET = 0x127b
|
||||
BLKRAGET = 0x1263
|
||||
BLKRASET = 0x1262
|
||||
BLKROGET = 0x125e
|
||||
BLKROSET = 0x125d
|
||||
BLKROTATIONAL = 0x127e
|
||||
BLKRRPART = 0x125f
|
||||
BLKSECDISCARD = 0x127d
|
||||
BLKSECTGET = 0x1267
|
||||
BLKSECTSET = 0x1266
|
||||
BLKSSZGET = 0x1268
|
||||
BLKZEROOUT = 0x127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x127a
|
||||
BLKBSZGET = 0x80041270
|
||||
BLKBSZSET = 0x40041271
|
||||
BLKDISCARD = 0x1277
|
||||
BLKDISCARDZEROES = 0x127c
|
||||
BLKFLSBUF = 0x1261
|
||||
BLKFRAGET = 0x1265
|
||||
BLKFRASET = 0x1264
|
||||
BLKGETDISKSEQ = 0x80081280
|
||||
BLKGETSIZE = 0x1260
|
||||
BLKGETSIZE64 = 0x80041272
|
||||
BLKIOMIN = 0x1278
|
||||
BLKIOOPT = 0x1279
|
||||
BLKPBSZGET = 0x127b
|
||||
BLKRAGET = 0x1263
|
||||
BLKRASET = 0x1262
|
||||
BLKROGET = 0x125e
|
||||
BLKROSET = 0x125d
|
||||
BLKROTATIONAL = 0x127e
|
||||
BLKRRPART = 0x125f
|
||||
BLKSECDISCARD = 0x127d
|
||||
BLKSECTGET = 0x1267
|
||||
BLKSECTSET = 0x1266
|
||||
BLKSSZGET = 0x1268
|
||||
BLKZEROOUT = 0x127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x127a
|
||||
BLKBSZGET = 0x80081270
|
||||
BLKBSZSET = 0x40081271
|
||||
BLKDISCARD = 0x1277
|
||||
BLKDISCARDZEROES = 0x127c
|
||||
BLKFLSBUF = 0x1261
|
||||
BLKFRAGET = 0x1265
|
||||
BLKFRASET = 0x1264
|
||||
BLKGETDISKSEQ = 0x80081280
|
||||
BLKGETSIZE = 0x1260
|
||||
BLKGETSIZE64 = 0x80081272
|
||||
BLKIOMIN = 0x1278
|
||||
BLKIOOPT = 0x1279
|
||||
BLKPBSZGET = 0x127b
|
||||
BLKRAGET = 0x1263
|
||||
BLKRASET = 0x1262
|
||||
BLKROGET = 0x125e
|
||||
BLKROSET = 0x125d
|
||||
BLKROTATIONAL = 0x127e
|
||||
BLKRRPART = 0x125f
|
||||
BLKSECDISCARD = 0x127d
|
||||
BLKSECTGET = 0x1267
|
||||
BLKSECTSET = 0x1266
|
||||
BLKSSZGET = 0x1268
|
||||
BLKZEROOUT = 0x127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x127a
|
||||
BLKBSZGET = 0x80081270
|
||||
BLKBSZSET = 0x40081271
|
||||
BLKDISCARD = 0x1277
|
||||
BLKDISCARDZEROES = 0x127c
|
||||
BLKFLSBUF = 0x1261
|
||||
BLKFRAGET = 0x1265
|
||||
BLKFRASET = 0x1264
|
||||
BLKGETDISKSEQ = 0x80081280
|
||||
BLKGETSIZE = 0x1260
|
||||
BLKGETSIZE64 = 0x80081272
|
||||
BLKIOMIN = 0x1278
|
||||
BLKIOOPT = 0x1279
|
||||
BLKPBSZGET = 0x127b
|
||||
BLKRAGET = 0x1263
|
||||
BLKRASET = 0x1262
|
||||
BLKROGET = 0x125e
|
||||
BLKROSET = 0x125d
|
||||
BLKROTATIONAL = 0x127e
|
||||
BLKRRPART = 0x125f
|
||||
BLKSECDISCARD = 0x127d
|
||||
BLKSECTGET = 0x1267
|
||||
BLKSECTSET = 0x1266
|
||||
BLKSSZGET = 0x1268
|
||||
BLKZEROOUT = 0x127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40041270
|
||||
BLKBSZSET = 0x80041271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40041272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40081270
|
||||
BLKBSZSET = 0x80081271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40081272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40081270
|
||||
BLKBSZSET = 0x80081271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40081272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40041270
|
||||
BLKBSZSET = 0x80041271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40041272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x10
|
||||
B576000 = 0x15
|
||||
B921600 = 0x16
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40041270
|
||||
BLKBSZSET = 0x80041271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40041272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1f
|
||||
BS1 = 0x8000
|
||||
BSDLY = 0x8000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x10
|
||||
B576000 = 0x15
|
||||
B921600 = 0x16
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40081270
|
||||
BLKBSZSET = 0x80081271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40081272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1f
|
||||
BS1 = 0x8000
|
||||
BSDLY = 0x8000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x10
|
||||
B576000 = 0x15
|
||||
B921600 = 0x16
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40081270
|
||||
BLKBSZSET = 0x80081271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40081272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1f
|
||||
BS1 = 0x8000
|
||||
BSDLY = 0x8000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x127a
|
||||
BLKBSZGET = 0x80081270
|
||||
BLKBSZSET = 0x40081271
|
||||
BLKDISCARD = 0x1277
|
||||
BLKDISCARDZEROES = 0x127c
|
||||
BLKFLSBUF = 0x1261
|
||||
BLKFRAGET = 0x1265
|
||||
BLKFRASET = 0x1264
|
||||
BLKGETDISKSEQ = 0x80081280
|
||||
BLKGETSIZE = 0x1260
|
||||
BLKGETSIZE64 = 0x80081272
|
||||
BLKIOMIN = 0x1278
|
||||
BLKIOOPT = 0x1279
|
||||
BLKPBSZGET = 0x127b
|
||||
BLKRAGET = 0x1263
|
||||
BLKRASET = 0x1262
|
||||
BLKROGET = 0x125e
|
||||
BLKROSET = 0x125d
|
||||
BLKROTATIONAL = 0x127e
|
||||
BLKRRPART = 0x125f
|
||||
BLKSECDISCARD = 0x127d
|
||||
BLKSECTGET = 0x1267
|
||||
BLKSECTSET = 0x1266
|
||||
BLKSSZGET = 0x1268
|
||||
BLKZEROOUT = 0x127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
generated
vendored
|
|
@ -27,22 +27,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x127a
|
||||
BLKBSZGET = 0x80081270
|
||||
BLKBSZSET = 0x40081271
|
||||
BLKDISCARD = 0x1277
|
||||
BLKDISCARDZEROES = 0x127c
|
||||
BLKFLSBUF = 0x1261
|
||||
BLKFRAGET = 0x1265
|
||||
BLKFRASET = 0x1264
|
||||
BLKGETDISKSEQ = 0x80081280
|
||||
BLKGETSIZE = 0x1260
|
||||
BLKGETSIZE64 = 0x80081272
|
||||
BLKIOMIN = 0x1278
|
||||
BLKIOOPT = 0x1279
|
||||
BLKPBSZGET = 0x127b
|
||||
BLKRAGET = 0x1263
|
||||
BLKRASET = 0x1262
|
||||
BLKROGET = 0x125e
|
||||
BLKROSET = 0x125d
|
||||
BLKROTATIONAL = 0x127e
|
||||
BLKRRPART = 0x125f
|
||||
BLKSECDISCARD = 0x127d
|
||||
BLKSECTGET = 0x1267
|
||||
BLKSECTSET = 0x1266
|
||||
BLKSSZGET = 0x1268
|
||||
BLKZEROOUT = 0x127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
generated
vendored
9
vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
generated
vendored
|
|
@ -30,22 +30,31 @@ const (
|
|||
B57600 = 0x1001
|
||||
B576000 = 0x1006
|
||||
B921600 = 0x1007
|
||||
BLKALIGNOFF = 0x2000127a
|
||||
BLKBSZGET = 0x40081270
|
||||
BLKBSZSET = 0x80081271
|
||||
BLKDISCARD = 0x20001277
|
||||
BLKDISCARDZEROES = 0x2000127c
|
||||
BLKFLSBUF = 0x20001261
|
||||
BLKFRAGET = 0x20001265
|
||||
BLKFRASET = 0x20001264
|
||||
BLKGETDISKSEQ = 0x40081280
|
||||
BLKGETSIZE = 0x20001260
|
||||
BLKGETSIZE64 = 0x40081272
|
||||
BLKIOMIN = 0x20001278
|
||||
BLKIOOPT = 0x20001279
|
||||
BLKPBSZGET = 0x2000127b
|
||||
BLKRAGET = 0x20001263
|
||||
BLKRASET = 0x20001262
|
||||
BLKROGET = 0x2000125e
|
||||
BLKROSET = 0x2000125d
|
||||
BLKROTATIONAL = 0x2000127e
|
||||
BLKRRPART = 0x2000125f
|
||||
BLKSECDISCARD = 0x2000127d
|
||||
BLKSECTGET = 0x20001267
|
||||
BLKSECTSET = 0x20001266
|
||||
BLKSSZGET = 0x20001268
|
||||
BLKZEROOUT = 0x2000127f
|
||||
BOTHER = 0x1000
|
||||
BS1 = 0x2000
|
||||
BSDLY = 0x2000
|
||||
|
|
|
|||
2
vendor/golang.org/x/sys/unix/zsyscall_linux.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsyscall_linux.go
generated
vendored
|
|
@ -1356,7 +1356,7 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
|
||||
func pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) {
|
||||
r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
|
|
|
|||
16
vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
generated
vendored
16
vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
generated
vendored
|
|
@ -531,3 +531,19 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) {
|
||||
var _p0 unsafe.Pointer
|
||||
if len(pairs) > 0 {
|
||||
_p0 = unsafe.Pointer(&pairs[0])
|
||||
} else {
|
||||
_p0 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := Syscall6(SYS_RISCV_HWPROBE, uintptr(_p0), uintptr(len(pairs)), uintptr(cpuCount), uintptr(unsafe.Pointer(cpus)), uintptr(flags), 0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
generated
vendored
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
generated
vendored
|
|
@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
|
||||
r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
|
||||
xaddr = uintptr(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
generated
vendored
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
generated
vendored
|
|
@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
|
||||
r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
|
||||
xaddr = uintptr(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
generated
vendored
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
generated
vendored
|
|
@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
|
||||
r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
|
||||
xaddr = uintptr(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
generated
vendored
11
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
generated
vendored
|
|
@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) {
|
||||
r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0)
|
||||
xaddr = uintptr(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
2
vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
generated
vendored
|
|
@ -251,6 +251,8 @@ const (
|
|||
SYS_ACCEPT4 = 242
|
||||
SYS_RECVMMSG = 243
|
||||
SYS_ARCH_SPECIFIC_SYSCALL = 244
|
||||
SYS_RISCV_HWPROBE = 258
|
||||
SYS_RISCV_FLUSH_ICACHE = 259
|
||||
SYS_WAIT4 = 260
|
||||
SYS_PRLIMIT64 = 261
|
||||
SYS_FANOTIFY_INIT = 262
|
||||
|
|
|
|||
5
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
5
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
|
|
@ -866,6 +866,11 @@ const (
|
|||
POLLNVAL = 0x20
|
||||
)
|
||||
|
||||
type sigset_argpack struct {
|
||||
ss *Sigset_t
|
||||
ssLen uintptr
|
||||
}
|
||||
|
||||
type SignalfdSiginfo struct {
|
||||
Signo uint32
|
||||
Errno int32
|
||||
|
|
|
|||
23
vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
generated
vendored
23
vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
generated
vendored
|
|
@ -718,3 +718,26 @@ type SysvShmDesc struct {
|
|||
_ uint64
|
||||
_ uint64
|
||||
}
|
||||
|
||||
type RISCVHWProbePairs struct {
|
||||
Key int64
|
||||
Value uint64
|
||||
}
|
||||
|
||||
const (
|
||||
RISCV_HWPROBE_KEY_MVENDORID = 0x0
|
||||
RISCV_HWPROBE_KEY_MARCHID = 0x1
|
||||
RISCV_HWPROBE_KEY_MIMPID = 0x2
|
||||
RISCV_HWPROBE_KEY_BASE_BEHAVIOR = 0x3
|
||||
RISCV_HWPROBE_BASE_BEHAVIOR_IMA = 0x1
|
||||
RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4
|
||||
RISCV_HWPROBE_IMA_FD = 0x1
|
||||
RISCV_HWPROBE_IMA_C = 0x2
|
||||
RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5
|
||||
RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0
|
||||
RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1
|
||||
RISCV_HWPROBE_MISALIGNED_SLOW = 0x2
|
||||
RISCV_HWPROBE_MISALIGNED_FAST = 0x3
|
||||
RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4
|
||||
RISCV_HWPROBE_MISALIGNED_MASK = 0x7
|
||||
)
|
||||
|
|
|
|||
4
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
4
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
|
|
@ -135,14 +135,14 @@ func Getpagesize() int { return 4096 }
|
|||
|
||||
// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
|
||||
// This is useful when interoperating with Windows code requiring callbacks.
|
||||
// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
|
||||
// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
|
||||
func NewCallback(fn interface{}) uintptr {
|
||||
return syscall.NewCallback(fn)
|
||||
}
|
||||
|
||||
// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention.
|
||||
// This is useful when interoperating with Windows code requiring callbacks.
|
||||
// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
|
||||
// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
|
||||
func NewCallbackCDecl(fn interface{}) uintptr {
|
||||
return syscall.NewCallbackCDecl(fn)
|
||||
}
|
||||
|
|
|
|||
2
vendor/golang.org/x/text/language/match.go
generated
vendored
2
vendor/golang.org/x/text/language/match.go
generated
vendored
|
|
@ -434,7 +434,7 @@ func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
|||
// (their canonicalization simply substitutes a different language code, but
|
||||
// nothing else), the match confidence is Exact, otherwise it is High.
|
||||
for i, lm := range language.AliasMap {
|
||||
// If deprecated codes match and there is no fiddling with the script or
|
||||
// If deprecated codes match and there is no fiddling with the script
|
||||
// or region, we consider it an exact match.
|
||||
conf := Exact
|
||||
if language.AliasTypes[i] != language.Macro {
|
||||
|
|
|
|||
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
|
|
@ -5,4 +5,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "0.134.0"
|
||||
const Version = "0.135.0"
|
||||
|
|
|
|||
28
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
28
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
|
|
@ -112,19 +112,31 @@ func (a *Attributes) String() string {
|
|||
sb.WriteString("{")
|
||||
first := true
|
||||
for k, v := range a.m {
|
||||
var key, val string
|
||||
if str, ok := k.(interface{ String() string }); ok {
|
||||
key = str.String()
|
||||
}
|
||||
if str, ok := v.(interface{ String() string }); ok {
|
||||
val = str.String()
|
||||
}
|
||||
if !first {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
|
||||
sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
|
||||
first = false
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func str(x interface{}) string {
|
||||
if v, ok := x.(fmt.Stringer); ok {
|
||||
return v.String()
|
||||
} else if v, ok := x.(string); ok {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("<%p>", x)
|
||||
}
|
||||
|
||||
// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
|
||||
// the Attributes correctly when printing (via pretty.JSON) structs containing
|
||||
// Attributes as fields.
|
||||
//
|
||||
// Is it impossible to unmarshal attributes from a JSON representation and this
|
||||
// method is meant only for debugging purposes.
|
||||
func (a *Attributes) MarshalJSON() ([]byte, error) {
|
||||
return []byte(a.String()), nil
|
||||
}
|
||||
|
|
|
|||
91
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
91
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
|
|
@ -37,6 +37,7 @@ import (
|
|||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
|
|
@ -867,6 +868,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
|||
cc.balancerWrapper.updateSubConnState(sc, s, err)
|
||||
}
|
||||
|
||||
// Makes a copy of the input addresses slice and clears out the balancer
|
||||
// attributes field. Addresses are passed during subconn creation and address
|
||||
// update operations. In both cases, we will clear the balancer attributes by
|
||||
// calling this function, and therefore we will be able to use the Equal method
|
||||
// provided by the resolver.Address type for comparison.
|
||||
func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
|
||||
out := make([]resolver.Address, len(in))
|
||||
for i := range in {
|
||||
out[i] = in[i]
|
||||
out[i].BalancerAttributes = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
||||
//
|
||||
// Caller needs to make sure len(addrs) > 0.
|
||||
|
|
@ -874,7 +889,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
|||
ac := &addrConn{
|
||||
state: connectivity.Idle,
|
||||
cc: cc,
|
||||
addrs: addrs,
|
||||
addrs: copyAddressesWithoutBalancerAttributes(addrs),
|
||||
scopts: opts,
|
||||
dopts: cc.dopts,
|
||||
czData: new(channelzData),
|
||||
|
|
@ -995,8 +1010,9 @@ func equalAddresses(a, b []resolver.Address) bool {
|
|||
// connections or connection attempts.
|
||||
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
||||
ac.mu.Lock()
|
||||
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
||||
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs))
|
||||
|
||||
addrs = copyAddressesWithoutBalancerAttributes(addrs)
|
||||
if equalAddresses(ac.addrs, addrs) {
|
||||
ac.mu.Unlock()
|
||||
return
|
||||
|
|
@ -1807,19 +1823,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
|
|||
}
|
||||
|
||||
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
||||
// resolver.Target struct containing scheme, authority and url. Query
|
||||
// params are stripped from the endpoint.
|
||||
// resolver.Target struct containing url. Query params are stripped from the
|
||||
// endpoint.
|
||||
func parseTarget(target string) (resolver.Target, error) {
|
||||
u, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return resolver.Target{}, err
|
||||
}
|
||||
|
||||
return resolver.Target{
|
||||
Scheme: u.Scheme,
|
||||
Authority: u.Host,
|
||||
URL: *u,
|
||||
}, nil
|
||||
return resolver.Target{URL: *u}, nil
|
||||
}
|
||||
|
||||
func encodeAuthority(authority string) string {
|
||||
const upperhex = "0123456789ABCDEF"
|
||||
|
||||
// Return for characters that must be escaped as per
|
||||
// Valid chars are mentioned here:
|
||||
// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
|
||||
shouldEscape := func(c byte) bool {
|
||||
// Alphanum are always allowed.
|
||||
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
switch c {
|
||||
case '-', '_', '.', '~': // Unreserved characters
|
||||
return false
|
||||
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
|
||||
return false
|
||||
case ':', '[', ']', '@': // Authority related delimeters
|
||||
return false
|
||||
}
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
|
||||
hexCount := 0
|
||||
for i := 0; i < len(authority); i++ {
|
||||
c := authority[i]
|
||||
if shouldEscape(c) {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
|
||||
if hexCount == 0 {
|
||||
return authority
|
||||
}
|
||||
|
||||
required := len(authority) + 2*hexCount
|
||||
t := make([]byte, required)
|
||||
|
||||
j := 0
|
||||
// This logic is a barebones version of escape in the go net/url library.
|
||||
for i := 0; i < len(authority); i++ {
|
||||
switch c := authority[i]; {
|
||||
case shouldEscape(c):
|
||||
t[j] = '%'
|
||||
t[j+1] = upperhex[c>>4]
|
||||
t[j+2] = upperhex[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = authority[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
// Determine channel authority. The order of precedence is as follows:
|
||||
|
|
@ -1872,7 +1939,11 @@ func (cc *ClientConn) determineAuthority() error {
|
|||
// the channel authority given the user's dial target. For resolvers
|
||||
// which don't implement this interface, we will use the endpoint from
|
||||
// "scheme://authority/endpoint" as the default authority.
|
||||
cc.authority = endpoint
|
||||
// Escape the endpoint to handle use cases where the endpoint
|
||||
// might not be a valid authority by default.
|
||||
// For example an endpoint which has multiple paths like
|
||||
// 'a/b/c', which is not a valid authority by default.
|
||||
cc.authority = encodeAuthority(endpoint)
|
||||
}
|
||||
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
||||
return nil
|
||||
|
|
|
|||
57
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
57
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
|
|
@ -25,8 +25,8 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
grpc "google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
|
@ -35,15 +35,13 @@ import (
|
|||
"google.golang.org/grpc/credentials/alts/internal/conn"
|
||||
altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
// The maximum byte size of receive frames.
|
||||
frameLimit = 64 * 1024 // 64 KB
|
||||
rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY"
|
||||
// maxPendingHandshakes represents the maximum number of concurrent
|
||||
// handshakes.
|
||||
maxPendingHandshakes = 100
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -59,9 +57,9 @@ var (
|
|||
return conn.NewAES128GCMRekey(s, keyData)
|
||||
},
|
||||
}
|
||||
// control number of concurrent created (but not closed) handshakers.
|
||||
mu sync.Mutex
|
||||
concurrentHandshakes = int64(0)
|
||||
// control number of concurrent created (but not closed) handshakes.
|
||||
clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes))
|
||||
serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes))
|
||||
// errDropped occurs when maxPendingHandshakes is reached.
|
||||
errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached")
|
||||
// errOutOfBound occurs when the handshake service returns a consumed
|
||||
|
|
@ -77,30 +75,6 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func acquire() bool {
|
||||
mu.Lock()
|
||||
// If we need n to be configurable, we can pass it as an argument.
|
||||
n := int64(1)
|
||||
success := maxPendingHandshakes-concurrentHandshakes >= n
|
||||
if success {
|
||||
concurrentHandshakes += n
|
||||
}
|
||||
mu.Unlock()
|
||||
return success
|
||||
}
|
||||
|
||||
func release() {
|
||||
mu.Lock()
|
||||
// If we need n to be configurable, we can pass it as an argument.
|
||||
n := int64(1)
|
||||
concurrentHandshakes -= n
|
||||
if concurrentHandshakes < 0 {
|
||||
mu.Unlock()
|
||||
panic("bad release")
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// ClientHandshakerOptions contains the client handshaker options that can
|
||||
// provided by the caller.
|
||||
type ClientHandshakerOptions struct {
|
||||
|
|
@ -134,10 +108,6 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions {
|
|||
return &ServerHandshakerOptions{}
|
||||
}
|
||||
|
||||
// TODO: add support for future local and remote endpoint in both client options
|
||||
// and server options (server options struct does not exist now. When
|
||||
// caller can provide endpoints, it should be created.
|
||||
|
||||
// altsHandshaker is used to complete an ALTS handshake between client and
|
||||
// server. This handshaker talks to the ALTS handshaker service in the metadata
|
||||
// server.
|
||||
|
|
@ -185,10 +155,10 @@ func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn,
|
|||
// ClientHandshake starts and completes a client ALTS handshake for GCP. Once
|
||||
// done, ClientHandshake returns a secure connection.
|
||||
func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||
if !acquire() {
|
||||
if !clientHandshakes.TryAcquire(1) {
|
||||
return nil, nil, errDropped
|
||||
}
|
||||
defer release()
|
||||
defer clientHandshakes.Release(1)
|
||||
|
||||
if h.side != core.ClientSide {
|
||||
return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker")
|
||||
|
|
@ -238,10 +208,10 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent
|
|||
// ServerHandshake starts and completes a server ALTS handshake for GCP. Once
|
||||
// done, ServerHandshake returns a secure connection.
|
||||
func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||
if !acquire() {
|
||||
if !serverHandshakes.TryAcquire(1) {
|
||||
return nil, nil, errDropped
|
||||
}
|
||||
defer release()
|
||||
defer serverHandshakes.Release(1)
|
||||
|
||||
if h.side != core.ServerSide {
|
||||
return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker")
|
||||
|
|
@ -264,8 +234,6 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent
|
|||
}
|
||||
|
||||
// Prepare server parameters.
|
||||
// TODO: currently only ALTS parameters are provided. Might need to use
|
||||
// more options in the future.
|
||||
params := make(map[int32]*altspb.ServerHandshakeParameters)
|
||||
params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{
|
||||
RecordProtocols: recordProtocols,
|
||||
|
|
@ -391,3 +359,10 @@ func (h *altsHandshaker) Close() {
|
|||
h.stream.CloseSend()
|
||||
}
|
||||
}
|
||||
|
||||
// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores
|
||||
// to allow numberOfAllowedHandshakes concurrent handshakes each.
|
||||
func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) {
|
||||
clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes)
|
||||
serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes)
|
||||
}
|
||||
|
|
|
|||
23
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
23
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
|
|
@ -78,6 +78,7 @@ type dialOptions struct {
|
|||
defaultServiceConfigRawJSON *string
|
||||
resolvers []resolver.Builder
|
||||
idleTimeout time.Duration
|
||||
recvBufferPool SharedBufferPool
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
|
|
@ -628,6 +629,7 @@ func defaultDialOptions() dialOptions {
|
|||
ReadBufferSize: defaultReadBufSize,
|
||||
UseProxy: true,
|
||||
},
|
||||
recvBufferPool: nopBufferPool{},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -676,3 +678,24 @@ func WithIdleTimeout(d time.Duration) DialOption {
|
|||
o.idleTimeout = d
|
||||
})
|
||||
}
|
||||
|
||||
// WithRecvBufferPool returns a DialOption that configures the ClientConn
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
//
|
||||
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||
// begin with grpc.NewSharedBufferPool.
|
||||
//
|
||||
// Note: The shared buffer pool feature will not be active if any of the following
|
||||
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
}
|
||||
|
|
|
|||
3
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
|
|
@ -40,6 +40,9 @@ var (
|
|||
// pick_first LB policy, which can be enabled by setting the environment
|
||||
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
|
||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
|
||||
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
|
||||
// handshakes that can be performed.
|
||||
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
|
||||
)
|
||||
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
|
|
|
|||
7
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
7
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
|
|
@ -80,6 +80,13 @@ func Uint32() uint32 {
|
|||
return r.Uint32()
|
||||
}
|
||||
|
||||
// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
|
||||
func ExpFloat64() float64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.ExpFloat64()
|
||||
}
|
||||
|
||||
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
||||
var Shuffle = func(n int, f func(int, int)) {
|
||||
mu.Lock()
|
||||
|
|
|
|||
136
vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
generated
vendored
Normal file
136
vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
generated
vendored
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Subscriber represents an entity that is subscribed to messages published on
|
||||
// a PubSub. It wraps the callback to be invoked by the PubSub when a new
|
||||
// message is published.
|
||||
type Subscriber interface {
|
||||
// OnMessage is invoked when a new message is published. Implementations
|
||||
// must not block in this method.
|
||||
OnMessage(msg interface{})
|
||||
}
|
||||
|
||||
// PubSub is a simple one-to-many publish-subscribe system that supports
|
||||
// messages of arbitrary type. It guarantees that messages are delivered in
|
||||
// the same order in which they were published.
|
||||
//
|
||||
// Publisher invokes the Publish() method to publish new messages, while
|
||||
// subscribers interested in receiving these messages register a callback
|
||||
// via the Subscribe() method.
|
||||
//
|
||||
// Once a PubSub is stopped, no more messages can be published, and
|
||||
// it is guaranteed that no more subscriber callback will be invoked.
|
||||
type PubSub struct {
|
||||
cs *CallbackSerializer
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Access to the below fields are guarded by this mutex.
|
||||
mu sync.Mutex
|
||||
msg interface{}
|
||||
subscribers map[Subscriber]bool
|
||||
stopped bool
|
||||
}
|
||||
|
||||
// NewPubSub returns a new PubSub instance.
|
||||
func NewPubSub() *PubSub {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &PubSub{
|
||||
cs: NewCallbackSerializer(ctx),
|
||||
cancel: cancel,
|
||||
subscribers: map[Subscriber]bool{},
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe registers the provided Subscriber to the PubSub.
|
||||
//
|
||||
// If the PubSub contains a previously published message, the Subscriber's
|
||||
// OnMessage() callback will be invoked asynchronously with the existing
|
||||
// message to begin with, and subsequently for every newly published message.
|
||||
//
|
||||
// The caller is responsible for invoking the returned cancel function to
|
||||
// unsubscribe itself from the PubSub.
|
||||
func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
|
||||
if ps.stopped {
|
||||
return func() {}
|
||||
}
|
||||
|
||||
ps.subscribers[sub] = true
|
||||
|
||||
if ps.msg != nil {
|
||||
msg := ps.msg
|
||||
ps.cs.Schedule(func(context.Context) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
if !ps.subscribers[sub] {
|
||||
return
|
||||
}
|
||||
sub.OnMessage(msg)
|
||||
})
|
||||
}
|
||||
|
||||
return func() {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
delete(ps.subscribers, sub)
|
||||
}
|
||||
}
|
||||
|
||||
// Publish publishes the provided message to the PubSub, and invokes
|
||||
// callbacks registered by subscribers asynchronously.
|
||||
func (ps *PubSub) Publish(msg interface{}) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
|
||||
if ps.stopped {
|
||||
return
|
||||
}
|
||||
|
||||
ps.msg = msg
|
||||
for sub := range ps.subscribers {
|
||||
s := sub
|
||||
ps.cs.Schedule(func(context.Context) {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
if !ps.subscribers[s] {
|
||||
return
|
||||
}
|
||||
s.OnMessage(msg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Stop shuts down the PubSub and releases any resources allocated by it.
|
||||
// It is guaranteed that no subscriber callbacks would be invoked once this
|
||||
// method returns.
|
||||
func (ps *PubSub) Stop() {
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
ps.stopped = true
|
||||
|
||||
ps.cancel()
|
||||
}
|
||||
74
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
74
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
|
|
@ -62,7 +62,8 @@ const (
|
|||
defaultPort = "443"
|
||||
defaultDNSSvrPort = "53"
|
||||
golang = "GO"
|
||||
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
||||
// txtPrefix is the prefix string to be prepended to the host name for txt
|
||||
// record lookup.
|
||||
txtPrefix = "_grpc_config."
|
||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||
// described in RFC-1464 using the attribute name grpc_config.
|
||||
|
|
@ -86,14 +87,14 @@ var (
|
|||
minDNSResRate = 30 * time.Second
|
||||
)
|
||||
|
||||
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||
var dialer net.Dialer
|
||||
return dialer.DialContext(ctx, network, authority)
|
||||
return dialer.DialContext(ctx, network, address)
|
||||
}
|
||||
}
|
||||
|
||||
var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||
var newNetResolver = func(authority string) (netResolver, error) {
|
||||
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
|
|||
|
||||
return &net.Resolver{
|
||||
PreferGo: true,
|
||||
Dial: customAuthorityDialler(authorityWithPort),
|
||||
Dial: addressDialer(authorityWithPort),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder {
|
|||
|
||||
type dnsBuilder struct{}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of
|
||||
// the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||
if err != nil {
|
||||
|
|
@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
|||
if target.URL.Host == "" {
|
||||
d.resolver = defaultResolver
|
||||
} else {
|
||||
d.resolver, err = customAuthorityResolver(target.URL.Host)
|
||||
d.resolver, err = newNetResolver(target.URL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -180,19 +182,22 @@ type dnsResolver struct {
|
|||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the
|
||||
// target.
|
||||
rn chan struct{}
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has
|
||||
// finished. Otherwise, data race will be possible. [Race Example] in
|
||||
// dns_resolver_test we replace the real lookup functions with mocked ones to
|
||||
// facilitate testing. If Close() doesn't wait for watcher() goroutine
|
||||
// finishes, race detector sometimes will warns lookup (READ the lookup
|
||||
// function pointers) inside watcher() goroutine has data race with
|
||||
// replaceNetFunc (WRITE the lookup function pointers).
|
||||
wg sync.WaitGroup
|
||||
disableServiceConfig bool
|
||||
}
|
||||
|
||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||
// ResolveNow invoke an immediate resolution of the target that this
|
||||
// dnsResolver watches.
|
||||
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
||||
select {
|
||||
case d.rn <- struct{}{}:
|
||||
|
|
@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() {
|
|||
|
||||
var timer *time.Timer
|
||||
if err == nil {
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
|
||||
// to prevent constantly re-resolving.
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30
|
||||
// seconds at the very least to prevent constantly re-resolving.
|
||||
backoffIndex = 1
|
||||
timer = newTimerDNSResRate(minDNSResRate)
|
||||
select {
|
||||
|
|
@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() {
|
|||
case <-d.rn:
|
||||
}
|
||||
} else {
|
||||
// Poll on an error found in DNS Resolver or an error received from ClientConn.
|
||||
// Poll on an error found in DNS Resolver or an error received from
|
||||
// ClientConn.
|
||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||
backoffIndex++
|
||||
}
|
||||
|
|
@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
|||
}
|
||||
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
dnsErr, ok := err.(*net.DNSError)
|
||||
if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
// Timeouts and temporary errors should be communicated to gRPC to
|
||||
// attempt another DNS query (with backoff). Other errors should be
|
||||
// suppressed (they may represent the absence of a TXT record).
|
||||
|
|
@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
|||
res += s
|
||||
}
|
||||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as
|
||||
// service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
// This is not an error; it is the equivalent of not having a service config.
|
||||
// This is not an error; it is the equivalent of not having a service
|
||||
// config.
|
||||
return nil
|
||||
}
|
||||
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
||||
|
|
@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
|||
return &state, nil
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
// If addr is an IPv4 address, return the addr and ok = true.
|
||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of
|
||||
// an IP address. If addr is an IPv4 address, return the addr and ok = true.
|
||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and
|
||||
// ok = true.
|
||||
func formatIP(addr string) (addrIP string, ok bool) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
|
|
@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) {
|
|||
return "[" + addr + "]", true
|
||||
}
|
||||
|
||||
// parseTarget takes the user input target string and default port, returns formatted host and port info.
|
||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
||||
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
||||
// are stripped when setting the host.
|
||||
// parseTarget takes the user input target string and default port, returns
|
||||
// formatted host and port info. If target doesn't specify a port, set the port
|
||||
// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
|
||||
// in square brackets, brackets are stripped when setting the host.
|
||||
// examples:
|
||||
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
||||
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
||||
|
|
@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
|
|||
}
|
||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||
if port == "" {
|
||||
// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
|
||||
// If the port field is empty (target ends with colon), e.g. "[::1]:",
|
||||
// this is an error.
|
||||
return "", "", errEndsWithColon
|
||||
}
|
||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||
if host == "" {
|
||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80",
|
||||
// the local system is assumed.
|
||||
host = "localhost"
|
||||
}
|
||||
return host, port, nil
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
|
|
@ -238,7 +238,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
kp.Timeout = defaultServerKeepaliveTimeout
|
||||
}
|
||||
if kp.Time != infinity {
|
||||
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||
if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
8
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
8
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
|
|
@ -142,6 +142,10 @@ type Address struct {
|
|||
|
||||
// Equal returns whether a and o are identical. Metadata is compared directly,
|
||||
// not with any recursive introspection.
|
||||
//
|
||||
// This method compares all fields of the address. When used to tell apart
|
||||
// addresses during subchannel creation or connection establishment, it might be
|
||||
// more appropriate for the caller to implement custom equality logic.
|
||||
func (a Address) Equal(o Address) bool {
|
||||
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
||||
a.Attributes.Equal(o.Attributes) &&
|
||||
|
|
@ -264,10 +268,6 @@ type ClientConn interface {
|
|||
// - "unknown_scheme://authority/endpoint"
|
||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
||||
type Target struct {
|
||||
// Deprecated: use URL.Scheme instead.
|
||||
Scheme string
|
||||
// Deprecated: use URL.Host instead.
|
||||
Authority string
|
||||
// URL contains the parsed dial target with an optional default scheme added
|
||||
// to it if the original dial target contained no scheme or contained an
|
||||
// unregistered scheme. Any query params specified in the original dial
|
||||
|
|
|
|||
27
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
27
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
|
|
@ -577,6 +577,9 @@ type parser struct {
|
|||
// The header of a gRPC message. Find more detail at
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||
header [5]byte
|
||||
|
||||
// recvBufferPool is the pool of shared receive buffers.
|
||||
recvBufferPool SharedBufferPool
|
||||
}
|
||||
|
||||
// recvMsg reads a complete gRPC message from the stream.
|
||||
|
|
@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
|||
if int(length) > maxReceiveMessageSize {
|
||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||
}
|
||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||
// of making it for each message:
|
||||
msg = make([]byte, int(length))
|
||||
msg = p.recvBufferPool.Get(int(length))
|
||||
if _, err := p.r.Read(msg); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
|
|
@ -726,12 +727,12 @@ type payloadInfo struct {
|
|||
}
|
||||
|
||||
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
||||
pf, buf, err := p.recvMsg(maxReceiveMessageSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.compressedLength = len(d)
|
||||
payInfo.compressedLength = len(buf)
|
||||
}
|
||||
|
||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||
|
|
@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||
// use this decompressor as the default.
|
||||
if dc != nil {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
size = len(d)
|
||||
buf, err = dc.Do(bytes.NewReader(buf))
|
||||
size = len(buf)
|
||||
} else {
|
||||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
||||
buf, size, err = decompress(compressor, buf, maxReceiveMessageSize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||
|
|
@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Using compressor, decompress d, returning data and size.
|
||||
|
|
@ -792,15 +793,17 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
|||
// dc takes precedence over compressor.
|
||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
||||
d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||
buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
if err := c.Unmarshal(buf, m); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.uncompressedBytes = d
|
||||
payInfo.uncompressedBytes = buf
|
||||
} else {
|
||||
p.recvBufferPool.Put(&buf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
27
vendor/google.golang.org/grpc/server.go
generated
vendored
27
vendor/google.golang.org/grpc/server.go
generated
vendored
|
|
@ -174,6 +174,7 @@ type serverOptions struct {
|
|||
maxHeaderListSize *uint32
|
||||
headerTableSize *uint32
|
||||
numServerWorkers uint32
|
||||
recvBufferPool SharedBufferPool
|
||||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
|
|
@ -182,6 +183,7 @@ var defaultServerOptions = serverOptions{
|
|||
connectionTimeout: 120 * time.Second,
|
||||
writeBufferSize: defaultWriteBufSize,
|
||||
readBufferSize: defaultReadBufSize,
|
||||
recvBufferPool: nopBufferPool{},
|
||||
}
|
||||
var globalServerOptions []ServerOption
|
||||
|
||||
|
|
@ -552,6 +554,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
|||
})
|
||||
}
|
||||
|
||||
// RecvBufferPool returns a ServerOption that configures the server
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
//
|
||||
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||
// begin with grpc.NewSharedBufferPool.
|
||||
//
|
||||
// Note: The shared buffer pool feature will not be active if any of the following
|
||||
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
}
|
||||
|
||||
// serverWorkerResetThreshold defines how often the stack must be reset. Every
|
||||
// N requests, by spawning a new goroutine in its place, a worker can reset its
|
||||
// stack so that large stacks don't live in memory forever. 2^16 should allow
|
||||
|
|
@ -1296,7 +1319,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
if len(shs) != 0 || len(binlogs) != 0 {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
|
|
@ -1506,7 +1529,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
ctx: ctx,
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{r: stream},
|
||||
p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
|
||||
codec: s.getCodec(stream.ContentSubtype()),
|
||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||
|
|
|
|||
154
vendor/google.golang.org/grpc/shared_buffer_pool.go
generated
vendored
Normal file
154
vendor/google.golang.org/grpc/shared_buffer_pool.go
generated
vendored
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import "sync"
|
||||
|
||||
// SharedBufferPool is a pool of buffers that can be shared, resulting in
|
||||
// decreased memory allocation. Currently, in gRPC-go, it is only utilized
|
||||
// for parsing incoming messages.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type SharedBufferPool interface {
|
||||
// Get returns a buffer with specified length from the pool.
|
||||
//
|
||||
// The returned byte slice may be not zero initialized.
|
||||
Get(length int) []byte
|
||||
|
||||
// Put returns a buffer to the pool.
|
||||
Put(*[]byte)
|
||||
}
|
||||
|
||||
// NewSharedBufferPool creates a simple SharedBufferPool with buckets
|
||||
// of different sizes to optimize memory usage. This prevents the pool from
|
||||
// wasting large amounts of memory, even when handling messages of varying sizes.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func NewSharedBufferPool() SharedBufferPool {
|
||||
return &simpleSharedBufferPool{
|
||||
pools: [poolArraySize]simpleSharedBufferChildPool{
|
||||
newBytesPool(level0PoolMaxSize),
|
||||
newBytesPool(level1PoolMaxSize),
|
||||
newBytesPool(level2PoolMaxSize),
|
||||
newBytesPool(level3PoolMaxSize),
|
||||
newBytesPool(level4PoolMaxSize),
|
||||
newBytesPool(0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
|
||||
type simpleSharedBufferPool struct {
|
||||
pools [poolArraySize]simpleSharedBufferChildPool
|
||||
}
|
||||
|
||||
func (p *simpleSharedBufferPool) Get(size int) []byte {
|
||||
return p.pools[p.poolIdx(size)].Get(size)
|
||||
}
|
||||
|
||||
func (p *simpleSharedBufferPool) Put(bs *[]byte) {
|
||||
p.pools[p.poolIdx(cap(*bs))].Put(bs)
|
||||
}
|
||||
|
||||
func (p *simpleSharedBufferPool) poolIdx(size int) int {
|
||||
switch {
|
||||
case size <= level0PoolMaxSize:
|
||||
return level0PoolIdx
|
||||
case size <= level1PoolMaxSize:
|
||||
return level1PoolIdx
|
||||
case size <= level2PoolMaxSize:
|
||||
return level2PoolIdx
|
||||
case size <= level3PoolMaxSize:
|
||||
return level3PoolIdx
|
||||
case size <= level4PoolMaxSize:
|
||||
return level4PoolIdx
|
||||
default:
|
||||
return levelMaxPoolIdx
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
level0PoolMaxSize = 16 // 16 B
|
||||
level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
|
||||
level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
|
||||
level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
|
||||
level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
|
||||
)
|
||||
|
||||
const (
|
||||
level0PoolIdx = iota
|
||||
level1PoolIdx
|
||||
level2PoolIdx
|
||||
level3PoolIdx
|
||||
level4PoolIdx
|
||||
levelMaxPoolIdx
|
||||
poolArraySize
|
||||
)
|
||||
|
||||
type simpleSharedBufferChildPool interface {
|
||||
Get(size int) []byte
|
||||
Put(interface{})
|
||||
}
|
||||
|
||||
type bufferPool struct {
|
||||
sync.Pool
|
||||
|
||||
defaultSize int
|
||||
}
|
||||
|
||||
func (p *bufferPool) Get(size int) []byte {
|
||||
bs := p.Pool.Get().(*[]byte)
|
||||
|
||||
if cap(*bs) < size {
|
||||
p.Pool.Put(bs)
|
||||
|
||||
return make([]byte, size)
|
||||
}
|
||||
|
||||
return (*bs)[:size]
|
||||
}
|
||||
|
||||
func newBytesPool(size int) simpleSharedBufferChildPool {
|
||||
return &bufferPool{
|
||||
Pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
bs := make([]byte, size)
|
||||
return &bs
|
||||
},
|
||||
},
|
||||
defaultSize: size,
|
||||
}
|
||||
}
|
||||
|
||||
// nopBufferPool is a buffer pool just makes new buffer without pooling.
|
||||
type nopBufferPool struct {
|
||||
}
|
||||
|
||||
func (nopBufferPool) Get(length int) []byte {
|
||||
return make([]byte, length)
|
||||
}
|
||||
|
||||
func (nopBufferPool) Put(*[]byte) {
|
||||
}
|
||||
4
vendor/google.golang.org/grpc/stream.go
generated
vendored
4
vendor/google.golang.org/grpc/stream.go
generated
vendored
|
|
@ -507,7 +507,7 @@ func (a *csAttempt) newStream() error {
|
|||
return toRPCErr(nse.Err)
|
||||
}
|
||||
a.s = s
|
||||
a.p = &parser{r: s}
|
||||
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1270,7 +1270,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
|||
return nil, err
|
||||
}
|
||||
as.s = s
|
||||
as.p = &parser{r: s}
|
||||
as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
|
||||
ac.incrCallsStarted()
|
||||
if desc != unaryStreamDesc {
|
||||
// Listen on stream context to cleanup when the stream context is
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
|
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.56.2"
|
||||
const Version = "1.57.0"
|
||||
|
|
|
|||
26
vendor/modules.txt
vendored
26
vendor/modules.txt
vendored
|
|
@ -119,7 +119,7 @@ github.com/acarl005/stripansi
|
|||
# github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
|
||||
## explicit; go 1.13
|
||||
github.com/asaskevich/govalidator
|
||||
# github.com/aws/aws-sdk-go v1.44.316
|
||||
# github.com/aws/aws-sdk-go v1.44.318
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
|
|
@ -193,7 +193,7 @@ github.com/cespare/xxhash/v2
|
|||
# github.com/containers/common v0.55.2
|
||||
## explicit; go 1.18
|
||||
github.com/containers/common/pkg/retry
|
||||
# github.com/containers/image/v5 v5.26.1
|
||||
# github.com/containers/image/v5 v5.27.0
|
||||
## explicit; go 1.18
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
|
|
@ -644,7 +644,7 @@ github.com/oracle/oci-go-sdk/v54/identity
|
|||
github.com/oracle/oci-go-sdk/v54/objectstorage
|
||||
github.com/oracle/oci-go-sdk/v54/objectstorage/transfer
|
||||
github.com/oracle/oci-go-sdk/v54/workrequests
|
||||
# github.com/osbuild/images v0.0.0-20230804084728-03212162ff49
|
||||
# github.com/osbuild/images v0.0.0-20230808122821-9548bf0d0140
|
||||
## explicit; go 1.19
|
||||
github.com/osbuild/images/internal/common
|
||||
github.com/osbuild/images/internal/dnfjson
|
||||
|
|
@ -864,7 +864,7 @@ go.opencensus.io/trace
|
|||
go.opencensus.io/trace/internal
|
||||
go.opencensus.io/trace/propagation
|
||||
go.opencensus.io/trace/tracestate
|
||||
# golang.org/x/crypto v0.11.0
|
||||
# golang.org/x/crypto v0.12.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/crypto/acme
|
||||
golang.org/x/crypto/acme/autocert
|
||||
|
|
@ -901,7 +901,7 @@ golang.org/x/exp/slices
|
|||
golang.org/x/mod/internal/lazyregexp
|
||||
golang.org/x/mod/module
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.12.0
|
||||
# golang.org/x/net v0.14.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/html
|
||||
|
|
@ -913,8 +913,8 @@ golang.org/x/net/http2/hpack
|
|||
golang.org/x/net/idna
|
||||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/oauth2 v0.10.0
|
||||
## explicit; go 1.17
|
||||
# golang.org/x/oauth2 v0.11.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/authhandler
|
||||
golang.org/x/oauth2/google
|
||||
|
|
@ -925,7 +925,7 @@ golang.org/x/oauth2/jwt
|
|||
# golang.org/x/sync v0.3.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/sync/semaphore
|
||||
# golang.org/x/sys v0.10.0
|
||||
# golang.org/x/sys v0.11.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/execabs
|
||||
|
|
@ -933,10 +933,10 @@ golang.org/x/sys/internal/unsafeheader
|
|||
golang.org/x/sys/plan9
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
# golang.org/x/term v0.10.0
|
||||
# golang.org/x/term v0.11.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.11.0
|
||||
# golang.org/x/text v0.12.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/text/cases
|
||||
golang.org/x/text/internal
|
||||
|
|
@ -972,7 +972,7 @@ golang.org/x/tools/internal/typeparams
|
|||
## explicit; go 1.17
|
||||
golang.org/x/xerrors
|
||||
golang.org/x/xerrors/internal
|
||||
# google.golang.org/api v0.134.0
|
||||
# google.golang.org/api v0.135.0
|
||||
## explicit; go 1.19
|
||||
google.golang.org/api/googleapi
|
||||
google.golang.org/api/googleapi/transport
|
||||
|
|
@ -1014,12 +1014,12 @@ google.golang.org/genproto/internal
|
|||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/api
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e
|
||||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/rpc/code
|
||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.56.2
|
||||
# google.golang.org/grpc v1.57.0
|
||||
## explicit; go 1.17
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue