Update osbuild/images to v0.56.0

Signed-off-by: Tomáš Hozza <thozza@redhat.com>
This commit is contained in:
Tomáš Hozza 2024-04-23 09:09:17 +02:00 committed by Achilleas Koutsou
parent 3df0c3a631
commit 5028a8c99d
159 changed files with 13207 additions and 3630 deletions

26
go.mod
View file

@ -16,7 +16,7 @@ require (
github.com/Azure/go-autorest/autorest v0.11.29
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
github.com/BurntSushi/toml v1.3.2
github.com/aws/aws-sdk-go v1.51.19
github.com/aws/aws-sdk-go v1.51.25
github.com/coreos/go-semver v0.3.1
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/deepmap/oapi-codegen v1.8.2
@ -36,7 +36,7 @@ require (
github.com/labstack/gommon v0.4.2
github.com/openshift-online/ocm-sdk-go v0.1.398
github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.55.0
github.com/osbuild/images v0.56.0
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1
github.com/osbuild/pulp-client v0.1.0
github.com/prometheus/client_golang v1.18.0
@ -45,17 +45,19 @@ require (
github.com/spf13/cobra v1.8.0
github.com/stretchr/testify v1.9.0
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453
github.com/vmware/govmomi v0.36.3
github.com/vmware/govmomi v0.37.1
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
golang.org/x/oauth2 v0.19.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.19.0
google.golang.org/api v0.172.0
google.golang.org/api v0.175.0
)
require (
cloud.google.com/go v0.112.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/auth v0.2.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.1 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.7 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
@ -81,7 +83,7 @@ require (
github.com/containerd/cgroups/v3 v3.0.2 // indirect
github.com/containerd/errdefs v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containers/common v0.58.1 // indirect
github.com/containers/common v0.58.2 // indirect
github.com/containers/image/v5 v5.30.0 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/ocicrypt v1.1.9 // indirect
@ -200,17 +202,17 @@ require (
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/mod v0.15.0 // indirect
golang.org/x/net v0.22.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/net v0.24.0 // indirect
golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.18.0 // indirect
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/grpc v1.62.1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
google.golang.org/grpc v1.63.2 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect

52
go.sum
View file

@ -1,10 +1,14 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
cloud.google.com/go/auth v0.2.2 h1:gmxNJs4YZYcw6YvKRtVBaF2fyUE6UrWPyzU8jHvYfmI=
cloud.google.com/go/auth v0.2.2/go.mod h1:2bDNJWtWziDT3Pu1URxHHbkHE/BbOCuyUiKIGcNvafo=
cloud.google.com/go/auth/oauth2adapt v0.2.1 h1:VSPmMmUlT8CkIZ2PzD9AlLN+R3+D1clXMWHHa6vG/Ag=
cloud.google.com/go/auth/oauth2adapt v0.2.1/go.mod h1:tOdK/k+D2e4GEwfBRA48dKNQiDsqIXxLh7VU319eV0g=
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM=
cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw=
@ -76,8 +80,8 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.51.19 h1:jp/Vx/mUpXttthvvo/4/Nn/3+zumirIlAFkp1Irf1kM=
github.com/aws/aws-sdk-go v1.51.19/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -98,8 +102,8 @@ github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containers/common v0.58.1 h1:E1DN9Lr7kgMVQy7AXLv1CYQCiqnweklMiYWbf0KOnqY=
github.com/containers/common v0.58.1/go.mod h1:l3vMqanJGj7tZ3W/i76gEJ128VXgFUO1tLaohJXPvdk=
github.com/containers/common v0.58.2 h1:5nu9lQz4QNSgovNk7NRk33SkqkVNKYoXh7L6gXmACow=
github.com/containers/common v0.58.2/go.mod h1:l3vMqanJGj7tZ3W/i76gEJ128VXgFUO1tLaohJXPvdk=
github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA=
github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@ -498,8 +502,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.398 h1:6C1mDcPxzG4jSduOaWixTTI5gSEO+
github.com/openshift-online/ocm-sdk-go v0.1.398/go.mod h1:tke8vKcE7eHKyRbkJv6qo4ljo919zhx04uyQTcgF5cQ=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.55.0 h1:zG++7pqJMG3z/DLA92bJ2eJIBn1AvIBmpSK3k0YXUlc=
github.com/osbuild/images v0.55.0/go.mod h1:FaW0Y+Uau77GT/uJFn6xhAzPGOKsmp12qHWqRmOpEVc=
github.com/osbuild/images v0.56.0 h1:4/CMo4JgvPwD0lNi9HCExERc16RnPdXBQ9Ud6BRqAWg=
github.com/osbuild/images v0.56.0/go.mod h1:N22xBt8wITxz8JfWZNY0FCl/tisaNqmGAz+GN9c7OEU=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1 h1:UFEJIcPa46W8gtWgOYzriRKYyy1t6SWL0BI7fPTuVvc=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1/go.mod h1:z+WA+dX6qMwc7fqY5jCzESDIlg4WR2sBQezxsoXv9Ik=
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=
@ -612,8 +616,8 @@ github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinC
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14=
github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YHgVNpR10VY=
github.com/vmware/govmomi v0.36.3 h1:1Ng3CBNQVbFjCQbKtfsewy5o3dFa+EoTjqeThVISUBc=
github.com/vmware/govmomi v0.36.3/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ=
github.com/vmware/govmomi v0.37.1 h1:SpI+Ofq+lC1zsLcJ9szLSb7fL4TypReVvUoWIgk2b6U=
github.com/vmware/govmomi v0.37.1/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
@ -682,8 +686,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
@ -717,8 +721,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
@ -772,8 +776,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -819,26 +823,26 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk=
google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis=
google.golang.org/api v0.175.0 h1:9bMDh10V9cBuU8N45Wlc3cKkItfqMRV0Fi8UscLEtbY=
google.golang.org/api v0.175.0/go.mod h1:Rra+ltKu14pps/4xTycZfobMgLpbosoaaL7c+SEMrO8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc=
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

94
vendor/cloud.google.com/go/auth/CHANGES.md generated vendored Normal file
View file

@ -0,0 +1,94 @@
# Changelog
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19)
### Bug Fixes
* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823)
* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833)
## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18)
### Bug Fixes
* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7))
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15)
### Breaking Changes
In the below mentioned commits there were a few large breaking changes since the
last release of the module.
1. The `Credentials` type has been moved to the root of the module as it is
becoming the core abstraction for the whole module.
2. Because of the above mentioned change many functions that previously
returned a `TokenProvider` now return `Credentials`. Similarly, these
functions have been renamed to be more specific.
3. Most places that used to take an optional `TokenProvider` now accept
`Credentials`. You can make a `Credentials` from a `TokenProvider` using the
constructor found in the `auth` package.
4. The `detect` package has been renamed to `credentials`. With this change some
function signatures were also updated for better readability.
5. Derivative auth flows like `impersonate` and `downscope` have been moved to
be under the new `credentials` package.
Although these changes are disruptive we think that they are for the best of the
long-term health of the module. We do not expect any more large breaking changes
like these in future revisions, even before 1.0.0. This version will be the
first version of the auth library that our client libraries start to use and
depend on.
### Features
* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6))
* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d))
* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670)
* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501))
* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac))
* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d))
* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5))
### Bug Fixes
* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b))
* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd))
* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824))
* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9))
* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10)
### Bug Fixes
* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136)
* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
## 0.1.0 (2023-10-18)
### Features
* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e))
* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993))
* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8))
* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7))
* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c))
* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5))
* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff))
* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe))
* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04))
* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507))
* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899))
### Bug Fixes
* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))

202
vendor/cloud.google.com/go/auth/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

4
vendor/cloud.google.com/go/auth/README.md generated vendored Normal file
View file

@ -0,0 +1,4 @@
# auth
This module is currently EXPERIMENTAL and under active development. It is not
yet intended to be used.

476
vendor/cloud.google.com/go/auth/auth.go generated vendored Normal file
View file

@ -0,0 +1,476 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/jwt"
)
const (
// Parameter keys for AuthCodeURL method to support PKCE.
codeChallengeKey = "code_challenge"
codeChallengeMethodKey = "code_challenge_method"
// Parameter key for Exchange method to support PKCE.
codeVerifierKey = "code_verifier"
// 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes,
// so we give it 15 seconds to refresh it's cache before attempting to refresh a token.
defaultExpiryDelta = 215 * time.Second
universeDomainDefault = "googleapis.com"
)
var (
defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType}
// for testing
timeNow = time.Now
)
// TokenProvider specifies an interface for anything that can return a token.
type TokenProvider interface {
// Token returns a Token or an error.
// The Token returned must be safe to use
// concurrently.
// The returned Token must not be modified.
// The context provided must be sent along to any requests that are made in
// the implementing code.
Token(context.Context) (*Token, error)
}
// Token holds the credential token used to authorized requests. All fields are
// considered read-only.
type Token struct {
// Value is the token used to authorize requests. It is usually an access
// token but may be other types of tokens such as ID tokens in some flows.
Value string
// Type is the type of token Value is. If uninitialized, it should be
// assumed to be a "Bearer" token.
Type string
// Expiry is the time the token is set to expire.
Expiry time.Time
// Metadata may include, but is not limited to, the body of the token
// response returned by the server.
Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values
}
// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not
// expired. A token is considered expired if [Token.Expiry] has passed or will
// pass in the next 10 seconds.
func (t *Token) IsValid() bool {
return t.isValidWithEarlyExpiry(defaultExpiryDelta)
}
func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool {
if t == nil || t.Value == "" {
return false
}
if t.Expiry.IsZero() {
return true
}
return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow())
}
// Credentials holds Google credentials, including
// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials).
type Credentials struct {
json []byte
projectID CredentialsPropertyProvider
quotaProjectID CredentialsPropertyProvider
// universeDomain is the default service domain for a given Cloud universe.
universeDomain CredentialsPropertyProvider
TokenProvider
}
// JSON returns the bytes associated with the the file used to source
// credentials if one was used.
func (c *Credentials) JSON() []byte {
return c.json
}
// ProjectID returns the associated project ID from the underlying file or
// environment.
func (c *Credentials) ProjectID(ctx context.Context) (string, error) {
if c.projectID == nil {
return internal.GetProjectID(c.json, ""), nil
}
v, err := c.projectID.GetProperty(ctx)
if err != nil {
return "", err
}
return internal.GetProjectID(c.json, v), nil
}
// QuotaProjectID returns the associated quota project ID from the underlying
// file or environment.
func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) {
if c.quotaProjectID == nil {
return internal.GetQuotaProject(c.json, ""), nil
}
v, err := c.quotaProjectID.GetProperty(ctx)
if err != nil {
return "", err
}
return internal.GetQuotaProject(c.json, v), nil
}
// UniverseDomain returns the default service domain for a given Cloud universe.
// The default value is "googleapis.com".
func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) {
if c.universeDomain == nil {
return universeDomainDefault, nil
}
v, err := c.universeDomain.GetProperty(ctx)
if err != nil {
return "", err
}
if v == "" {
return universeDomainDefault, nil
}
return v, err
}
// CredentialsPropertyProvider provides an implementation to fetch a property
// value for [Credentials].
type CredentialsPropertyProvider interface {
GetProperty(context.Context) (string, error)
}
// CredentialsPropertyFunc is a type adapter to allow the use of ordinary
// functions as a [CredentialsPropertyProvider].
type CredentialsPropertyFunc func(context.Context) (string, error)
// GetProperty loads the properly value provided the given context.
func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) {
return p(ctx)
}
// CredentialsOptions are used to configure [Credentials].
type CredentialsOptions struct {
// TokenProvider is a means of sourcing a token for the credentials. Required.
TokenProvider TokenProvider
// JSON is the raw contents of the credentials file if sourced from a file.
JSON []byte
// ProjectIDProvider resolves the project ID associated with the
// credentials.
ProjectIDProvider CredentialsPropertyProvider
// QuotaProjectIDProvider resolves the quota project ID associated with the
// credentials.
QuotaProjectIDProvider CredentialsPropertyProvider
// UniverseDomainProvider resolves the universe domain with the credentials.
UniverseDomainProvider CredentialsPropertyProvider
}
// NewCredentials returns new [Credentials] from the provided options. Most users
// will want to build this object a function from the
// [cloud.google.com/go/auth/credentials] package.
func NewCredentials(opts *CredentialsOptions) *Credentials {
creds := &Credentials{
TokenProvider: opts.TokenProvider,
json: opts.JSON,
projectID: opts.ProjectIDProvider,
quotaProjectID: opts.QuotaProjectIDProvider,
universeDomain: opts.UniverseDomainProvider,
}
return creds
}
// CachedTokenProviderOptions provided options for configuring a
// CachedTokenProvider.
type CachedTokenProviderOptions struct {
// DisableAutoRefresh makes the TokenProvider always return the same token,
// even if it is expired.
DisableAutoRefresh bool
// ExpireEarly configures the amount of time before a token expires, that it
// should be refreshed. If unset, the default value is 10 seconds.
ExpireEarly time.Duration
}
func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
if ctpo == nil {
return true
}
return !ctpo.DisableAutoRefresh
}
func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
if ctpo == nil {
return defaultExpiryDelta
}
return ctpo.ExpireEarly
}
// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
// by the underlying provider. By default it will refresh tokens ten seconds
// before they expire, but this time can be configured with the optional
// options.
func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
if ctp, ok := tp.(*cachedTokenProvider); ok {
return ctp
}
return &cachedTokenProvider{
tp: tp,
autoRefresh: opts.autoRefresh(),
expireEarly: opts.expireEarly(),
}
}
type cachedTokenProvider struct {
tp TokenProvider
autoRefresh bool
expireEarly time.Duration
mu sync.Mutex
cachedToken *Token
}
func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.cachedToken.IsValid() || !c.autoRefresh {
return c.cachedToken, nil
}
t, err := c.tp.Token(ctx)
if err != nil {
return nil, err
}
c.cachedToken = t
return t, nil
}
// Error is a error associated with retrieving a [Token]. It can hold useful
// additional details for debugging.
type Error struct {
// Response is the HTTP response associated with error. The body will always
// be already closed and consumed.
Response *http.Response
// Body is the HTTP response body.
Body []byte
// Err is the underlying wrapped error.
Err error
// code returned in the token response
code string
// description returned in the token response
description string
// uri returned in the token response
uri string
}
func (e *Error) Error() string {
if e.code != "" {
s := fmt.Sprintf("auth: %q", e.code)
if e.description != "" {
s += fmt.Sprintf(" %q", e.description)
}
if e.uri != "" {
s += fmt.Sprintf(" %q", e.uri)
}
return s
}
return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body)
}
// Temporary returns true if the error is considered temporary and may be able
// to be retried.
func (e *Error) Temporary() bool {
if e.Response == nil {
return false
}
sc := e.Response.StatusCode
return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests
}
func (e *Error) Unwrap() error {
return e.Err
}
// Style describes how the token endpoint wants to receive the ClientID and
// ClientSecret.
type Style int
const (
// StyleUnknown means the value has not been initiated. Sending this in
// a request will cause the token exchange to fail.
StyleUnknown Style = iota
// StyleInParams sends client info in the body of a POST request.
StyleInParams
// StyleInHeader sends client info using Basic Authorization header.
StyleInHeader
)
// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow.
type Options2LO struct {
// Email is the OAuth2 client ID. This value is set as the "iss" in the
// JWT.
Email string
// PrivateKey contains the contents of an RSA private key or the
// contents of a PEM file that contains a private key. It is used to sign
// the JWT created.
PrivateKey []byte
// TokenURL is th URL the JWT is sent to. Required.
TokenURL string
// PrivateKeyID is the ID of the key used to sign the JWT. It is used as the
// "kid" in the JWT header. Optional.
PrivateKeyID string
// Subject is the used for to impersonate a user. It is used as the "sub" in
// the JWT.m Optional.
Subject string
// Scopes specifies requested permissions for the token. Optional.
Scopes []string
// Expires specifies the lifetime of the token. Optional.
Expires time.Duration
// Audience specifies the "aud" in the JWT. Optional.
Audience string
// PrivateClaims allows specifying any custom claims for the JWT. Optional.
PrivateClaims map[string]interface{}
// Client is the client to be used to make the underlying token requests.
// Optional.
Client *http.Client
// UseIDToken requests that the token returned be an ID token if one is
// returned from the server. Optional.
UseIDToken bool
}
func (o *Options2LO) client() *http.Client {
if o.Client != nil {
return o.Client
}
return internal.CloneDefaultClient()
}
func (o *Options2LO) validate() error {
if o == nil {
return errors.New("auth: options must be provided")
}
if o.Email == "" {
return errors.New("auth: email must be provided")
}
if len(o.PrivateKey) == 0 {
return errors.New("auth: private key must be provided")
}
if o.TokenURL == "" {
return errors.New("auth: token URL must be provided")
}
return nil
}
// New2LOTokenProvider returns a [TokenProvider] from the provided options.
func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
if err := opts.validate(); err != nil {
return nil, err
}
return tokenProvider2LO{opts: opts, Client: opts.client()}, nil
}
type tokenProvider2LO struct {
opts *Options2LO
Client *http.Client
}
func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
pk, err := internal.ParseKey(tp.opts.PrivateKey)
if err != nil {
return nil, err
}
claimSet := &jwt.Claims{
Iss: tp.opts.Email,
Scope: strings.Join(tp.opts.Scopes, " "),
Aud: tp.opts.TokenURL,
AdditionalClaims: tp.opts.PrivateClaims,
Sub: tp.opts.Subject,
}
if t := tp.opts.Expires; t > 0 {
claimSet.Exp = time.Now().Add(t).Unix()
}
if aud := tp.opts.Audience; aud != "" {
claimSet.Aud = aud
}
h := *defaultHeader
h.KeyID = tp.opts.PrivateKeyID
payload, err := jwt.EncodeJWS(&h, claimSet, pk)
if err != nil {
return nil, err
}
v := url.Values{}
v.Set("grant_type", defaultGrantType)
v.Set("assertion", payload)
resp, err := tp.Client.PostForm(tp.opts.TokenURL, v)
if err != nil {
return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
}
defer resp.Body.Close()
body, err := internal.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
}
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, &Error{
Response: resp,
Body: body,
}
}
// tokenRes is the JSON response body.
var tokenRes struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
ExpiresIn int64 `json:"expires_in"`
}
if err := json.Unmarshal(body, &tokenRes); err != nil {
return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
}
token := &Token{
Value: tokenRes.AccessToken,
Type: tokenRes.TokenType,
}
token.Metadata = make(map[string]interface{})
json.Unmarshal(body, &token.Metadata) // no error checks for optional fields
if secs := tokenRes.ExpiresIn; secs > 0 {
token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
}
if v := tokenRes.IDToken; v != "" {
// decode returned id token to get expiry
claimSet, err := jwt.DecodeJWS(v)
if err != nil {
return nil, fmt.Errorf("auth: error decoding JWT token: %w", err)
}
token.Expiry = time.Unix(claimSet.Exp, 0)
}
if tp.opts.UseIDToken {
if tokenRes.IDToken == "" {
return nil, fmt.Errorf("auth: response doesn't have JWT token")
}
token.Value = tokenRes.IDToken
}
return token, nil
}

85
vendor/cloud.google.com/go/auth/credentials/compute.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credentials
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"strings"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/compute/metadata"
)
var (
computeTokenMetadata = map[string]interface{}{
"auth.google.tokenSource": "compute-metadata",
"auth.google.serviceAccount": "default",
}
computeTokenURI = "instance/service-accounts/default/token"
)
// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
// uses the metadata service to retrieve tokens.
func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider {
return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{
ExpireEarly: earlyExpiry,
})
}
// computeProvider fetches tokens from the google cloud metadata service.
type computeProvider struct {
scopes []string
}
type metadataTokenResp struct {
AccessToken string `json:"access_token"`
ExpiresInSec int `json:"expires_in"`
TokenType string `json:"token_type"`
}
func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) {
tokenURI, err := url.Parse(computeTokenURI)
if err != nil {
return nil, err
}
if len(cs.scopes) > 0 {
v := url.Values{}
v.Set("scopes", strings.Join(cs.scopes, ","))
tokenURI.RawQuery = v.Encode()
}
tokenJSON, err := metadata.Get(tokenURI.String())
if err != nil {
return nil, err
}
var res metadataTokenResp
if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil {
return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err)
}
if res.ExpiresInSec == 0 || res.AccessToken == "" {
return nil, errors.New("credentials: incomplete token received from metadata")
}
return &auth.Token{
Value: res.AccessToken,
Type: res.TokenType,
Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
Metadata: computeTokenMetadata,
}, nil
}

249
vendor/cloud.google.com/go/auth/credentials/detect.go generated vendored Normal file
View file

@ -0,0 +1,249 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credentials
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/compute/metadata"
)
const (
// jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow.
jwtTokenURL = "https://oauth2.googleapis.com/token"
// Google's OAuth 2.0 default endpoints.
googleAuthURL = "https://accounts.google.com/o/oauth2/auth"
googleTokenURL = "https://oauth2.googleapis.com/token"
// Help on default credentials
adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc"
)
var (
// for testing
allowOnGCECheck = true
)
// OnGCE reports whether this process is running in Google Cloud.
func OnGCE() bool {
// TODO(codyoss): once all libs use this auth lib move metadata check here
return allowOnGCECheck && metadata.OnGCE()
}
// DetectDefault searches for "Application Default Credentials" and returns
// a credential based on the [DetectOptions] provided.
//
// It looks for credentials in the following places, preferring the first
// location found:
//
// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS
// environment variable. For workload identity federation, refer to
// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation
// on how to generate the JSON configuration file for on-prem/non-Google
// cloud platforms.
// - A JSON file in a location known to the gcloud command-line tool. On
// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On
// other systems, $HOME/.config/gcloud/application_default_credentials.json.
// - On Google Compute Engine, Google App Engine standard second generation
// runtimes, and Google App Engine flexible environment, it fetches
// credentials from the metadata server.
func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
if err := opts.validate(); err != nil {
return nil, err
}
if opts.CredentialsJSON != nil {
return readCredentialsFileJSON(opts.CredentialsJSON, opts)
}
if filename := credsfile.GetFileNameFromEnv(opts.CredentialsFile); filename != "" {
if creds, err := readCredentialsFile(filename, opts); err == nil {
return creds, err
}
}
fileName := credsfile.GetWellKnownFileName()
if b, err := os.ReadFile(fileName); err == nil {
return readCredentialsFileJSON(b, opts)
}
if OnGCE() {
return auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...),
ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) {
return metadata.ProjectID()
}),
UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{},
}), nil
}
return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL)
}
// DetectOptions provides configuration for [DetectDefault].
type DetectOptions struct {
// Scopes that credentials tokens should have. Example:
// https://www.googleapis.com/auth/cloud-platform. Required if Audience is
// not provided.
Scopes []string
// Audience that credentials tokens should have. Only applicable for 2LO
// flows with service accounts. If specified, scopes should not be provided.
Audience string
// Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority).
// Optional.
Subject string
// EarlyTokenRefresh configures how early before a token expires that it
// should be refreshed.
EarlyTokenRefresh time.Duration
// AuthHandlerOptions configures an authorization handler and other options
// for 3LO flows. It is required, and only used, for client credential
// flows.
AuthHandlerOptions *auth.AuthorizationHandlerOptions
// TokenURL allows to set the token endpoint for user credential flows. If
// unset the default value is: https://oauth2.googleapis.com/token.
// Optional.
TokenURL string
// STSAudience is the audience sent to when retrieving an STS token.
// Currently this only used for GDCH auth flow, for which it is required.
STSAudience string
// CredentialsFile overrides detection logic and sources a credential file
// from the provided filepath. If provided, CredentialsJSON must not be.
// Optional.
CredentialsFile string
// CredentialsJSON overrides detection logic and uses the JSON bytes as the
// source for the credential. If provided, CredentialsFile must not be.
// Optional.
CredentialsJSON []byte
// UseSelfSignedJWT directs service account based credentials to create a
// self-signed JWT with the private key found in the file, skipping any
// network requests that would normally be made. Optional.
UseSelfSignedJWT bool
// Client configures the underlying client used to make network requests
// when fetching tokens. Optional.
Client *http.Client
// UniverseDomain is the default service domain for a given Cloud universe.
// The default value is "googleapis.com". This option is ignored for
// authentication flows that do not support universe domain. Optional.
UniverseDomain string
}
func (o *DetectOptions) validate() error {
if o == nil {
return errors.New("credentials: options must be provided")
}
if len(o.Scopes) > 0 && o.Audience != "" {
return errors.New("credentials: both scopes and audience were provided")
}
if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" {
return errors.New("credentials: both credentials file and JSON were provided")
}
return nil
}
func (o *DetectOptions) tokenURL() string {
if o.TokenURL != "" {
return o.TokenURL
}
return googleTokenURL
}
func (o *DetectOptions) scopes() []string {
scopes := make([]string, len(o.Scopes))
copy(scopes, o.Scopes)
return scopes
}
func (o *DetectOptions) client() *http.Client {
if o.Client != nil {
return o.Client
}
return internal.CloneDefaultClient()
}
func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
return readCredentialsFileJSON(b, opts)
}
func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
// attempt to parse jsonData as a Google Developers Console client_credentials.json.
config := clientCredConfigFromJSON(b, opts)
if config != nil {
if config.AuthHandlerOpts == nil {
return nil, errors.New("credentials: auth handler must be specified for this credential filetype")
}
tp, err := auth.New3LOTokenProvider(config)
if err != nil {
return nil, err
}
return auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: tp,
JSON: b,
}), nil
}
return fileCredentials(b, opts)
}
func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
var creds credsfile.ClientCredentialsFile
var c *credsfile.Config3LO
if err := json.Unmarshal(b, &creds); err != nil {
return nil
}
switch {
case creds.Web != nil:
c = creds.Web
case creds.Installed != nil:
c = creds.Installed
default:
return nil
}
if len(c.RedirectURIs) < 1 {
return nil
}
var handleOpts *auth.AuthorizationHandlerOptions
if opts.AuthHandlerOptions != nil {
handleOpts = &auth.AuthorizationHandlerOptions{
Handler: opts.AuthHandlerOptions.Handler,
State: opts.AuthHandlerOptions.State,
PKCEOpts: opts.AuthHandlerOptions.PKCEOpts,
}
}
return &auth.Options3LO{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
RedirectURL: c.RedirectURIs[0],
Scopes: opts.scopes(),
AuthURL: c.AuthURI,
TokenURL: c.TokenURI,
Client: opts.client(),
EarlyTokenExpiry: opts.EarlyTokenRefresh,
AuthHandlerOpts: handleOpts,
// TODO(codyoss): refactor this out. We need to add in auto-detection
// for this use case.
AuthStyle: auth.StyleInParams,
}
}

45
vendor/cloud.google.com/go/auth/credentials/doc.go generated vendored Normal file
View file

@ -0,0 +1,45 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package credentials provides support for making OAuth2 authorized and
// authenticated HTTP requests to Google APIs. It supports the Web server flow,
// client-side credentials, service accounts, Google Compute Engine service
// accounts, Google App Engine service accounts and workload identity federation
// from non-Google cloud platforms.
//
// A brief overview of the package follows. For more information, please read
// https://developers.google.com/accounts/docs/OAuth2
// and
// https://developers.google.com/accounts/docs/application-default-credentials.
// For more information on using workload identity federation, refer to
// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation.
//
// # Credentials
//
// The [cloud.google.com/go/auth.Credentials] type represents Google
// credentials, including Application Default Credentials.
//
// Use [DetectDefault] to obtain Application Default Credentials.
//
// Application Default Credentials support workload identity federation to
// access Google Cloud resources from non-Google Cloud platforms including Amazon
// Web Services (AWS), Microsoft Azure or any identity provider that supports
// OpenID Connect (OIDC). Workload identity federation is recommended for
// non-Google Cloud environments as it avoids the need to download, manage, and
// store service account private keys locally.
//
// # Workforce Identity Federation
//
// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount].
package credentials

View file

@ -0,0 +1,219 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credentials
import (
"errors"
"fmt"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials/internal/externalaccount"
"cloud.google.com/go/auth/credentials/internal/externalaccountuser"
"cloud.google.com/go/auth/credentials/internal/gdch"
"cloud.google.com/go/auth/credentials/internal/impersonate"
internalauth "cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
)
func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
fileType, err := credsfile.ParseFileType(b)
if err != nil {
return nil, err
}
var projectID, quotaProjectID, universeDomain string
var tp auth.TokenProvider
switch fileType {
case credsfile.ServiceAccountKey:
f, err := credsfile.ParseServiceAccount(b)
if err != nil {
return nil, err
}
tp, err = handleServiceAccount(f, opts)
if err != nil {
return nil, err
}
projectID = f.ProjectID
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.UserCredentialsKey:
f, err := credsfile.ParseUserCredentials(b)
if err != nil {
return nil, err
}
tp, err = handleUserCredential(f, opts)
if err != nil {
return nil, err
}
quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ExternalAccountKey:
f, err := credsfile.ParseExternalAccount(b)
if err != nil {
return nil, err
}
tp, err = handleExternalAccount(f, opts)
if err != nil {
return nil, err
}
quotaProjectID = f.QuotaProjectID
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.ExternalAccountAuthorizedUserKey:
f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
if err != nil {
return nil, err
}
tp, err = handleExternalAccountAuthorizedUser(f, opts)
if err != nil {
return nil, err
}
quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ImpersonatedServiceAccountKey:
f, err := credsfile.ParseImpersonatedServiceAccount(b)
if err != nil {
return nil, err
}
tp, err = handleImpersonatedServiceAccount(f, opts)
if err != nil {
return nil, err
}
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.GDCHServiceAccountKey:
f, err := credsfile.ParseGDCHServiceAccount(b)
if err != nil {
return nil, err
}
tp, err = handleGDCHServiceAccount(f, opts)
if err != nil {
return nil, err
}
projectID = f.Project
universeDomain = f.UniverseDomain
default:
return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType)
}
return auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenRefresh,
}),
JSON: b,
ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID),
UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
}), nil
}
// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to
// support configuring universe-specific credentials in code. Auth flows
// unsupported for universe domain should not use this func, but should instead
// simply set the file universe domain on the credentials.
func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string {
if optsUniverseDomain != "" {
return optsUniverseDomain
}
return fileUniverseDomain
}
func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
if opts.UseSelfSignedJWT {
return configureSelfSignedJWT(f, opts)
}
opts2LO := &auth.Options2LO{
Email: f.ClientEmail,
PrivateKey: []byte(f.PrivateKey),
PrivateKeyID: f.PrivateKeyID,
Scopes: opts.scopes(),
TokenURL: f.TokenURL,
Subject: opts.Subject,
}
if opts2LO.TokenURL == "" {
opts2LO.TokenURL = jwtTokenURL
}
return auth.New2LOTokenProvider(opts2LO)
}
func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) {
opts3LO := &auth.Options3LO{
ClientID: f.ClientID,
ClientSecret: f.ClientSecret,
Scopes: opts.scopes(),
AuthURL: googleAuthURL,
TokenURL: opts.tokenURL(),
AuthStyle: auth.StyleInParams,
EarlyTokenExpiry: opts.EarlyTokenRefresh,
RefreshToken: f.RefreshToken,
}
return auth.New3LOTokenProvider(opts3LO)
}
func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
externalOpts := &externalaccount.Options{
Audience: f.Audience,
SubjectTokenType: f.SubjectTokenType,
TokenURL: f.TokenURL,
TokenInfoURL: f.TokenInfoURL,
ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL,
ClientSecret: f.ClientSecret,
ClientID: f.ClientID,
CredentialSource: f.CredentialSource,
QuotaProjectID: f.QuotaProjectID,
Scopes: opts.scopes(),
WorkforcePoolUserProject: f.WorkforcePoolUserProject,
Client: opts.client(),
}
if f.ServiceAccountImpersonation != nil {
externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds
}
return externalaccount.NewTokenProvider(externalOpts)
}
func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) {
externalOpts := &externalaccountuser.Options{
Audience: f.Audience,
RefreshToken: f.RefreshToken,
TokenURL: f.TokenURL,
TokenInfoURL: f.TokenInfoURL,
ClientID: f.ClientID,
ClientSecret: f.ClientSecret,
Scopes: opts.scopes(),
Client: opts.client(),
}
return externalaccountuser.NewTokenProvider(externalOpts)
}
func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil {
return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials")
}
tp, err := fileCredentials(f.CredSource, opts)
if err != nil {
return nil, err
}
return impersonate.NewTokenProvider(&impersonate.Options{
URL: f.ServiceAccountImpersonationURL,
Scopes: opts.scopes(),
Tp: tp,
Delegates: f.Delegates,
Client: opts.client(),
})
}
func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
return gdch.NewTokenProvider(f, &gdch.Options{
STSAudience: opts.STSAudience,
Client: opts.client(),
})
}

View file

@ -0,0 +1,547 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalaccount
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"path"
"sort"
"strings"
"time"
"cloud.google.com/go/auth/internal"
)
var (
// getenv aliases os.Getenv for testing
getenv = os.Getenv
)
const (
// AWS Signature Version 4 signing algorithm identifier.
awsAlgorithm = "AWS4-HMAC-SHA256"
// The termination string for the AWS credential scope value as defined in
// https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
awsRequestType = "aws4_request"
// The AWS authorization header name for the security session token if available.
awsSecurityTokenHeader = "x-amz-security-token"
// The name of the header containing the session token for metadata endpoint calls
awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token"
awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
awsIMDSv2SessionTTL = "300"
// The AWS authorization header name for the auto-generated date.
awsDateHeader = "x-amz-date"
defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
// Supported AWS configuration environment variables.
awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
awsRegionEnvVar = "AWS_REGION"
awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
awsTimeFormatLong = "20060102T150405Z"
awsTimeFormatShort = "20060102"
awsProviderType = "aws"
)
type awsSubjectProvider struct {
EnvironmentID string
RegionURL string
RegionalCredVerificationURL string
CredVerificationURL string
IMDSv2SessionTokenURL string
TargetResource string
requestSigner *awsRequestSigner
region string
securityCredentialsProvider AwsSecurityCredentialsProvider
reqOpts *RequestOptions
Client *http.Client
}
func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
// Set Defaults
if sp.RegionalCredVerificationURL == "" {
sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
}
if sp.requestSigner == nil {
headers := make(map[string]string)
if sp.shouldUseMetadataServer() {
awsSessionToken, err := sp.getAWSSessionToken(ctx)
if err != nil {
return "", err
}
if awsSessionToken != "" {
headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
}
}
awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
if err != nil {
return "", err
}
if sp.region, err = sp.getRegion(ctx, headers); err != nil {
return "", err
}
sp.requestSigner = &awsRequestSigner{
RegionName: sp.region,
AwsSecurityCredentials: awsSecurityCredentials,
}
}
// Generate the signed request to AWS STS GetCallerIdentity API.
// Use the required regional endpoint. Otherwise, the request will fail.
req, err := http.NewRequest("POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
if err != nil {
return "", err
}
// The full, canonical resource name of the workload identity pool
// provider, with or without the HTTPS prefix.
// Including this header as part of the signature is recommended to
// ensure data integrity.
if sp.TargetResource != "" {
req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource)
}
sp.requestSigner.signRequest(req)
/*
The GCP STS endpoint expects the headers to be formatted as:
# [
# {key: 'x-amz-date', value: '...'},
# {key: 'Authorization', value: '...'},
# ...
# ]
# And then serialized as:
# quote(json.dumps({
# url: '...',
# method: 'POST',
# headers: [{key: 'x-amz-date', value: '...'}, ...]
# }))
*/
awsSignedReq := awsRequest{
URL: req.URL.String(),
Method: "POST",
}
for headerKey, headerList := range req.Header {
for _, headerValue := range headerList {
awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{
Key: headerKey,
Value: headerValue,
})
}
}
sort.Slice(awsSignedReq.Headers, func(i, j int) bool {
headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key)
if headerCompare == 0 {
return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0
}
return headerCompare < 0
})
result, err := json.Marshal(awsSignedReq)
if err != nil {
return "", err
}
return url.QueryEscape(string(result)), nil
}
func (sp *awsSubjectProvider) providerType() string {
if sp.securityCredentialsProvider != nil {
return programmaticProviderType
}
return awsProviderType
}
func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) {
if sp.IMDSv2SessionTokenURL == "" {
return "", nil
}
req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil)
if err != nil {
return "", err
}
req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
resp, err := sp.Client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
respBody, err := internal.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", respBody)
}
return string(respBody), nil
}
func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) {
if sp.securityCredentialsProvider != nil {
return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts)
}
if canRetrieveRegionFromEnvironment() {
if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" {
return envAwsRegion, nil
}
return getenv(awsDefaultRegionEnvVar), nil
}
if sp.RegionURL == "" {
return "", errors.New("credentials: unable to determine AWS region")
}
req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil)
if err != nil {
return "", err
}
for name, value := range headers {
req.Header.Add(name, value)
}
resp, err := sp.Client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
respBody, err := internal.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", respBody)
}
// This endpoint will return the region in format: us-east-2b.
// Only the us-east-2 part should be used.
bodyLen := len(respBody)
if bodyLen == 0 {
return "", nil
}
return string(respBody[:bodyLen-1]), nil
}
func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) {
if sp.securityCredentialsProvider != nil {
return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts)
}
if canRetrieveSecurityCredentialFromEnvironment() {
return &AwsSecurityCredentials{
AccessKeyID: getenv(awsAccessKeyIDEnvVar),
SecretAccessKey: getenv(awsSecretAccessKeyEnvVar),
SessionToken: getenv(awsSessionTokenEnvVar),
}, nil
}
roleName, err := sp.getMetadataRoleName(ctx, headers)
if err != nil {
return
}
credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers)
if err != nil {
return
}
if credentials.AccessKeyID == "" {
return result, errors.New("credentials: missing AccessKeyId credential")
}
if credentials.SecretAccessKey == "" {
return result, errors.New("credentials: missing SecretAccessKey credential")
}
return credentials, nil
}
func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) {
var result *AwsSecurityCredentials
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil)
if err != nil {
return result, err
}
for name, value := range headers {
req.Header.Add(name, value)
}
resp, err := sp.Client.Do(req)
if err != nil {
return result, err
}
defer resp.Body.Close()
respBody, err := internal.ReadAll(resp.Body)
if err != nil {
return result, err
}
if resp.StatusCode != http.StatusOK {
return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", respBody)
}
err = json.Unmarshal(respBody, &result)
return result, err
}
func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) {
if sp.CredVerificationURL == "" {
return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint")
}
req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil)
if err != nil {
return "", err
}
for name, value := range headers {
req.Header.Add(name, value)
}
resp, err := sp.Client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
respBody, err := internal.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", respBody)
}
return string(respBody), nil
}
// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature.
type awsRequestSigner struct {
RegionName string
AwsSecurityCredentials *AwsSecurityCredentials
}
// signRequest adds the appropriate headers to an http.Request
// or returns an error if something prevented this.
func (rs *awsRequestSigner) signRequest(req *http.Request) error {
// req is assumed non-nil
signedRequest := cloneRequest(req)
timestamp := Now()
signedRequest.Header.Set("host", requestHost(req))
if rs.AwsSecurityCredentials.SessionToken != "" {
signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken)
}
if signedRequest.Header.Get("date") == "" {
signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong))
}
authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp)
if err != nil {
return err
}
signedRequest.Header.Set("Authorization", authorizationCode)
req.Header = signedRequest.Header
return nil
}
func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) {
canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req)
dateStamp := timestamp.Format(awsTimeFormatShort)
serviceName := ""
if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 {
serviceName = splitHost[0]
}
credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/")
requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData)
if err != nil {
return "", err
}
requestHash, err := getSha256([]byte(requestString))
if err != nil {
return "", err
}
stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n")
signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey)
for _, signingInput := range []string{
dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign,
} {
signingKey, err = getHmacSha256(signingKey, []byte(signingInput))
if err != nil {
return "", err
}
}
return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil
}
func getSha256(input []byte) (string, error) {
hash := sha256.New()
if _, err := hash.Write(input); err != nil {
return "", err
}
return hex.EncodeToString(hash.Sum(nil)), nil
}
func getHmacSha256(key, input []byte) ([]byte, error) {
hash := hmac.New(sha256.New, key)
if _, err := hash.Write(input); err != nil {
return nil, err
}
return hash.Sum(nil), nil
}
func cloneRequest(r *http.Request) *http.Request {
r2 := new(http.Request)
*r2 = *r
if r.Header != nil {
r2.Header = make(http.Header, len(r.Header))
// Find total number of values.
headerCount := 0
for _, headerValues := range r.Header {
headerCount += len(headerValues)
}
copiedHeaders := make([]string, headerCount) // shared backing array for headers' values
for headerKey, headerValues := range r.Header {
headerCount = copy(copiedHeaders, headerValues)
r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount]
copiedHeaders = copiedHeaders[headerCount:]
}
}
return r2
}
func canonicalPath(req *http.Request) string {
result := req.URL.EscapedPath()
if result == "" {
return "/"
}
return path.Clean(result)
}
func canonicalQuery(req *http.Request) string {
queryValues := req.URL.Query()
for queryKey := range queryValues {
sort.Strings(queryValues[queryKey])
}
return queryValues.Encode()
}
func canonicalHeaders(req *http.Request) (string, string) {
// Header keys need to be sorted alphabetically.
var headers []string
lowerCaseHeaders := make(http.Header)
for k, v := range req.Header {
k := strings.ToLower(k)
if _, ok := lowerCaseHeaders[k]; ok {
// include additional values
lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...)
} else {
headers = append(headers, k)
lowerCaseHeaders[k] = v
}
}
sort.Strings(headers)
var fullHeaders bytes.Buffer
for _, header := range headers {
headerValue := strings.Join(lowerCaseHeaders[header], ",")
fullHeaders.WriteString(header)
fullHeaders.WriteRune(':')
fullHeaders.WriteString(headerValue)
fullHeaders.WriteRune('\n')
}
return strings.Join(headers, ";"), fullHeaders.String()
}
func requestDataHash(req *http.Request) (string, error) {
var requestData []byte
if req.Body != nil {
requestBody, err := req.GetBody()
if err != nil {
return "", err
}
defer requestBody.Close()
requestData, err = internal.ReadAll(requestBody)
if err != nil {
return "", err
}
}
return getSha256(requestData)
}
func requestHost(req *http.Request) string {
if req.Host != "" {
return req.Host
}
return req.URL.Host
}
func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) {
dataHash, err := requestDataHash(req)
if err != nil {
return "", err
}
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil
}
type awsRequestHeader struct {
Key string `json:"key"`
Value string `json:"value"`
}
type awsRequest struct {
URL string `json:"url"`
Method string `json:"method"`
Headers []awsRequestHeader `json:"headers"`
}
// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is
// required.
func canRetrieveRegionFromEnvironment() bool {
return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != ""
}
// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available.
func canRetrieveSecurityCredentialFromEnvironment() bool {
return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != ""
}
func (sp *awsSubjectProvider) shouldUseMetadataServer() bool {
return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment())
}

View file

@ -0,0 +1,284 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalaccount
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"os/exec"
"regexp"
"strings"
"time"
"cloud.google.com/go/auth/internal"
)
const (
executableSupportedMaxVersion = 1
executableDefaultTimeout = 30 * time.Second
executableSource = "response"
executableProviderType = "executable"
outputFileSource = "output file"
allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES"
jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
idTokenType = "urn:ietf:params:oauth:token-type:id_token"
saml2TokenType = "urn:ietf:params:oauth:token-type:saml2"
)
var (
serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`)
)
type nonCacheableError struct {
message string
}
func (nce nonCacheableError) Error() string {
return nce.message
}
// environment is a contract for testing
type environment interface {
existingEnv() []string
getenv(string) string
run(ctx context.Context, command string, env []string) ([]byte, error)
now() time.Time
}
type runtimeEnvironment struct{}
func (r runtimeEnvironment) existingEnv() []string {
return os.Environ()
}
func (r runtimeEnvironment) getenv(key string) string {
return os.Getenv(key)
}
func (r runtimeEnvironment) now() time.Time {
return time.Now().UTC()
}
func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) {
splitCommand := strings.Fields(command)
cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...)
cmd.Env = env
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
if ctx.Err() == context.DeadlineExceeded {
return nil, context.DeadlineExceeded
}
if exitError, ok := err.(*exec.ExitError); ok {
return nil, exitCodeError(exitError)
}
return nil, executableError(err)
}
bytesStdout := bytes.TrimSpace(stdout.Bytes())
if len(bytesStdout) > 0 {
return bytesStdout, nil
}
return bytes.TrimSpace(stderr.Bytes()), nil
}
type executableSubjectProvider struct {
Command string
Timeout time.Duration
OutputFile string
client *http.Client
opts *Options
env environment
}
type executableResponse struct {
Version int `json:"version,omitempty"`
Success *bool `json:"success,omitempty"`
TokenType string `json:"token_type,omitempty"`
ExpirationTime int64 `json:"expiration_time,omitempty"`
IDToken string `json:"id_token,omitempty"`
SamlResponse string `json:"saml_response,omitempty"`
Code string `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) {
var result executableResponse
if err := json.Unmarshal(response, &result); err != nil {
return "", jsonParsingError(source, string(response))
}
// Validate
if result.Version == 0 {
return "", missingFieldError(source, "version")
}
if result.Success == nil {
return "", missingFieldError(source, "success")
}
if !*result.Success {
if result.Code == "" || result.Message == "" {
return "", malformedFailureError()
}
return "", userDefinedError(result.Code, result.Message)
}
if result.Version > executableSupportedMaxVersion || result.Version < 0 {
return "", unsupportedVersionError(source, result.Version)
}
if result.ExpirationTime == 0 && sp.OutputFile != "" {
return "", missingFieldError(source, "expiration_time")
}
if result.TokenType == "" {
return "", missingFieldError(source, "token_type")
}
if result.ExpirationTime != 0 && result.ExpirationTime < now {
return "", tokenExpiredError()
}
switch result.TokenType {
case jwtTokenType, idTokenType:
if result.IDToken == "" {
return "", missingFieldError(source, "id_token")
}
return result.IDToken, nil
case saml2TokenType:
if result.SamlResponse == "" {
return "", missingFieldError(source, "saml_response")
}
return result.SamlResponse, nil
default:
return "", tokenTypeError(source)
}
}
func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) {
if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil {
return token, err
}
return sp.getTokenFromExecutableCommand(ctx)
}
func (sp *executableSubjectProvider) providerType() string {
return executableProviderType
}
func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) {
if sp.OutputFile == "" {
// This ExecutableCredentialSource doesn't use an OutputFile.
return "", nil
}
file, err := os.Open(sp.OutputFile)
if err != nil {
// No OutputFile found. Hasn't been created yet, so skip it.
return "", nil
}
defer file.Close()
data, err := internal.ReadAll(file)
if err != nil || len(data) == 0 {
// Cachefile exists, but no data found. Get new credential.
return "", nil
}
token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix())
if err != nil {
if _, ok := err.(nonCacheableError); ok {
// If the cached token is expired we need a new token,
// and if the cache contains a failure, we need to try again.
return "", nil
}
// There was an error in the cached token, and the developer should be aware of it.
return "", err
}
// Token parsing succeeded. Use found token.
return token, nil
}
func (sp *executableSubjectProvider) executableEnvironment() []string {
result := sp.env.existingEnv()
result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience))
result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType))
result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0")
if sp.opts.ServiceAccountImpersonationURL != "" {
matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL)
if matches != nil {
result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1]))
}
}
if sp.OutputFile != "" {
result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile))
}
return result
}
func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) {
// For security reasons, we need our consumers to set this environment variable to allow executables to be run.
if sp.env.getenv(allowExecutablesEnvVar) != "1" {
return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run")
}
ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout))
defer cancel()
output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment())
if err != nil {
return "", err
}
return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix())
}
func missingFieldError(source, field string) error {
return fmt.Errorf("credentials: %q missing %q field", source, field)
}
func jsonParsingError(source, data string) error {
return fmt.Errorf("credentials: unable to parse %q: %v", source, data)
}
func malformedFailureError() error {
return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"}
}
func userDefinedError(code, message string) error {
return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)}
}
func unsupportedVersionError(source string, version int) error {
return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version)
}
func tokenExpiredError() error {
return nonCacheableError{"credentials: the token returned by the executable is expired"}
}
func tokenTypeError(source string) error {
return fmt.Errorf("credentials: %v contains unsupported token type", source)
}
func exitCodeError(err *exec.ExitError) error {
return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err)
}
func executableError(err error) error {
return fmt.Errorf("credentials: executable command failed: %w", err)
}

View file

@ -0,0 +1,367 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalaccount
import (
"context"
"errors"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials/internal/impersonate"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal/credsfile"
)
const (
timeoutMinimum = 5 * time.Second
timeoutMaximum = 120 * time.Second
universeDomainPlaceholder = "UNIVERSE_DOMAIN"
defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token"
defaultUniverseDomain = "googleapis.com"
)
var (
// Now aliases time.Now for testing
Now = func() time.Time {
return time.Now().UTC()
}
validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`)
)
// Options stores the configuration for fetching tokens with external credentials.
type Options struct {
// Audience is the Secure Token Service (STS) audience which contains the resource name for the workload
// identity pool or the workforce pool and the provider identifier in that pool.
Audience string
// SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec
// e.g. `urn:ietf:params:oauth:token-type:jwt`.
SubjectTokenType string
// TokenURL is the STS token exchange endpoint.
TokenURL string
// TokenInfoURL is the token_info endpoint used to retrieve the account related information (
// user attributes like account identifier, eg. email, username, uid, etc). This is
// needed for gCloud session account identification.
TokenInfoURL string
// ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only
// required for workload identity pools when APIs to be accessed have not integrated with UberMint.
ServiceAccountImpersonationURL string
// ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation
// token will be valid for.
ServiceAccountImpersonationLifetimeSeconds int
// ClientSecret is currently only required if token_info endpoint also
// needs to be called with the generated GCP access token. When provided, STS will be
// called with additional basic authentication using client_id as username and client_secret as password.
ClientSecret string
// ClientID is only required in conjunction with ClientSecret, as described above.
ClientID string
// CredentialSource contains the necessary information to retrieve the token itself, as well
// as some environmental information.
CredentialSource *credsfile.CredentialSource
// QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries
// will set the x-goog-user-project which overrides the project associated with the credentials.
QuotaProjectID string
// Scopes contains the desired scopes for the returned access token.
Scopes []string
// WorkforcePoolUserProject should be set when it is a workforce pool and
// not a workload identity pool. The underlying principal must still have
// serviceusage.services.use IAM permission to use the project for
// billing/quota. Optional.
WorkforcePoolUserProject string
// UniverseDomain is the default service domain for a given Cloud universe.
// This value will be used in the default STS token URL. The default value
// is "googleapis.com". It will not be used if TokenURL is set. Optional.
UniverseDomain string
// SubjectTokenProvider is an optional token provider for OIDC/SAML
// credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider
// or CredentialSource must be provided. Optional.
SubjectTokenProvider SubjectTokenProvider
// AwsSecurityCredentialsProvider is an AWS Security Credential provider
// for AWS credentials. One of SubjectTokenProvider,
// AWSSecurityCredentialProvider or CredentialSource must be provided. Optional.
AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider
// Client for token request.
Client *http.Client
}
// SubjectTokenProvider can be used to supply a subject token to exchange for a
// GCP access token.
type SubjectTokenProvider interface {
// SubjectToken should return a valid subject token or an error.
// The external account token provider does not cache the returned subject
// token, so caching logic should be implemented in the provider to prevent
// multiple requests for the same subject token.
SubjectToken(ctx context.Context, opts *RequestOptions) (string, error)
}
// RequestOptions contains information about the requested subject token or AWS
// security credentials from the Google external account credential.
type RequestOptions struct {
// Audience is the requested audience for the external account credential.
Audience string
// Subject token type is the requested subject token type for the external
// account credential. Expected values include:
// “urn:ietf:params:oauth:token-type:jwt”
// “urn:ietf:params:oauth:token-type:id-token”
// “urn:ietf:params:oauth:token-type:saml2”
// “urn:ietf:params:aws:token-type:aws4_request”
SubjectTokenType string
}
// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials
// and an AWS Region to exchange for a GCP access token.
type AwsSecurityCredentialsProvider interface {
// AwsRegion should return the AWS region or an error.
AwsRegion(ctx context.Context, opts *RequestOptions) (string, error)
// GetAwsSecurityCredentials should return a valid set of
// AwsSecurityCredentials or an error. The external account token provider
// does not cache the returned security credentials, so caching logic should
// be implemented in the provider to prevent multiple requests for the
// same security credentials.
AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error)
}
// AwsSecurityCredentials models AWS security credentials.
type AwsSecurityCredentials struct {
// AccessKeyId is the AWS Access Key ID - Required.
AccessKeyID string `json:"AccessKeyID"`
// SecretAccessKey is the AWS Secret Access Key - Required.
SecretAccessKey string `json:"SecretAccessKey"`
// SessionToken is the AWS Session token. This should be provided for
// temporary AWS security credentials - Optional.
SessionToken string `json:"Token"`
}
func (o *Options) validate() error {
if o.Audience == "" {
return fmt.Errorf("externalaccount: Audience must be set")
}
if o.SubjectTokenType == "" {
return fmt.Errorf("externalaccount: Subject token type must be set")
}
if o.WorkforcePoolUserProject != "" {
if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid {
return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials")
}
}
count := 0
if o.CredentialSource != nil {
count++
}
if o.SubjectTokenProvider != nil {
count++
}
if o.AwsSecurityCredentialsProvider != nil {
count++
}
if count == 0 {
return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
}
if count > 1 {
return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
}
return nil
}
// resolveTokenURL sets the default STS token endpoint with the configured
// universe domain.
func (o *Options) resolveTokenURL() {
if o.TokenURL != "" {
return
} else if o.UniverseDomain != "" {
o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1)
} else {
o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1)
}
}
// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
// configured with the provided options.
func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
if err := opts.validate(); err != nil {
return nil, err
}
opts.resolveTokenURL()
stp, err := newSubjectTokenProvider(opts)
if err != nil {
return nil, err
}
tp := &tokenProvider{
client: opts.Client,
opts: opts,
stp: stp,
}
if opts.ServiceAccountImpersonationURL == "" {
return auth.NewCachedTokenProvider(tp, nil), nil
}
scopes := make([]string, len(opts.Scopes))
copy(scopes, opts.Scopes)
// needed for impersonation
tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"}
imp, err := impersonate.NewTokenProvider(&impersonate.Options{
Client: opts.Client,
URL: opts.ServiceAccountImpersonationURL,
Scopes: scopes,
Tp: auth.NewCachedTokenProvider(tp, nil),
TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
})
if err != nil {
return nil, err
}
return auth.NewCachedTokenProvider(imp, nil), nil
}
type subjectTokenProvider interface {
subjectToken(ctx context.Context) (string, error)
providerType() string
}
// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
type tokenProvider struct {
client *http.Client
opts *Options
stp subjectTokenProvider
}
func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
subjectToken, err := tp.stp.subjectToken(ctx)
if err != nil {
return nil, err
}
stsRequest := &stsexchange.TokenRequest{
GrantType: stsexchange.GrantType,
Audience: tp.opts.Audience,
Scope: tp.opts.Scopes,
RequestedTokenType: stsexchange.TokenType,
SubjectToken: subjectToken,
SubjectTokenType: tp.opts.SubjectTokenType,
}
header := make(http.Header)
header.Set("Content-Type", "application/x-www-form-urlencoded")
header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp))
clientAuth := stsexchange.ClientAuthentication{
AuthStyle: auth.StyleInHeader,
ClientID: tp.opts.ClientID,
ClientSecret: tp.opts.ClientSecret,
}
var options map[string]interface{}
// Do not pass workforce_pool_user_project when client authentication is used.
// The client ID is sufficient for determining the user project.
if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" {
options = map[string]interface{}{
"userProject": tp.opts.WorkforcePoolUserProject,
}
}
stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{
Client: tp.client,
Endpoint: tp.opts.TokenURL,
Request: stsRequest,
Authentication: clientAuth,
Headers: header,
ExtraOpts: options,
})
if err != nil {
return nil, err
}
tok := &auth.Token{
Value: stsResp.AccessToken,
Type: stsResp.TokenType,
}
// The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior.
if stsResp.ExpiresIn <= 0 {
return nil, fmt.Errorf("credentials: got invalid expiry from security token service")
}
tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second)
return tok, nil
}
// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
// subjectTokenProvider
func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
if o.AwsSecurityCredentialsProvider != nil {
return &awsSubjectProvider{
securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
TargetResource: o.Audience,
reqOpts: reqOpts,
}, nil
} else if o.SubjectTokenProvider != nil {
return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
} else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" {
if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil {
if awsVersion != 1 {
return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion)
}
awsProvider := &awsSubjectProvider{
EnvironmentID: o.CredentialSource.EnvironmentID,
RegionURL: o.CredentialSource.RegionURL,
RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL,
CredVerificationURL: o.CredentialSource.URL,
TargetResource: o.Audience,
Client: o.Client,
}
if o.CredentialSource.IMDSv2SessionTokenURL != "" {
awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
}
return awsProvider, nil
}
} else if o.CredentialSource.File != "" {
return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
} else if o.CredentialSource.URL != "" {
return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil
} else if o.CredentialSource.Executable != nil {
ec := o.CredentialSource.Executable
if ec.Command == "" {
return nil, errors.New("credentials: missing `command` field — executable command must be provided")
}
execProvider := &executableSubjectProvider{}
execProvider.Command = ec.Command
if ec.TimeoutMillis == 0 {
execProvider.Timeout = executableDefaultTimeout
} else {
execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond
if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum {
return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds())
}
}
execProvider.OutputFile = ec.OutputFile
execProvider.client = o.Client
execProvider.opts = o
execProvider.env = runtimeEnvironment{}
return execProvider, nil
}
return nil, errors.New("credentials: unable to parse credential source")
}
func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string {
return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t",
goVersion(),
"unknown",
p.providerType(),
conf.ServiceAccountImpersonationURL != "",
conf.ServiceAccountImpersonationLifetimeSeconds != 0)
}

View file

@ -0,0 +1,78 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalaccount
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
)
const (
fileProviderType = "file"
)
type fileSubjectProvider struct {
File string
Format *credsfile.Format
}
func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) {
tokenFile, err := os.Open(sp.File)
if err != nil {
return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err)
}
defer tokenFile.Close()
tokenBytes, err := internal.ReadAll(tokenFile)
if err != nil {
return "", fmt.Errorf("credentials: failed to read credential file: %w", err)
}
tokenBytes = bytes.TrimSpace(tokenBytes)
if sp.Format == nil {
return string(tokenBytes), nil
}
switch sp.Format.Type {
case fileTypeJSON:
jsonData := make(map[string]interface{})
err = json.Unmarshal(tokenBytes, &jsonData)
if err != nil {
return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
}
val, ok := jsonData[sp.Format.SubjectTokenFieldName]
if !ok {
return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
}
token, ok := val.(string)
if !ok {
return "", errors.New("credentials: improperly formatted subject token")
}
return token, nil
case fileTypeText:
return string(tokenBytes), nil
default:
return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
}
}
func (sp *fileSubjectProvider) providerType() string {
return fileProviderType
}

View file

@ -0,0 +1,74 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalaccount
import (
"runtime"
"strings"
"unicode"
)
var (
// version is a package internal global variable for testing purposes.
version = runtime.Version
)
// versionUnknown is only used when the runtime version cannot be determined.
const versionUnknown = "UNKNOWN"
// goVersion returns a Go runtime version derived from the runtime environment
// that is modified to be suitable for reporting in a header, meaning it has no
// whitespace. If it is unable to determine the Go runtime version, it returns
// versionUnknown.
func goVersion() string {
const develPrefix = "devel +"
s := version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
} else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
notSemverRune := func(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
// Some release candidates already have a dash in them.
if !strings.HasPrefix(prerelease, "-") {
prerelease = "-" + prerelease
}
s += prerelease
}
return s
}
return versionUnknown
}

View file

@ -1,4 +1,4 @@
// Copyright 2022 Google LLC
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,12 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// This file, and the {{.RootMod}} import, won't actually become part of
// the resultant binary.
//go:build modhack
// +build modhack
package externalaccount
package metadata
import "context"
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
import _ "cloud.google.com/go/compute/internal"
type programmaticProvider struct {
opts *RequestOptions
stp SubjectTokenProvider
}
func (pp *programmaticProvider) providerType() string {
return programmaticProviderType
}
func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) {
return pp.stp.SubjectToken(ctx, pp.opts)
}

View file

@ -0,0 +1,93 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalaccount
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
)
const (
fileTypeText = "text"
fileTypeJSON = "json"
urlProviderType = "url"
programmaticProviderType = "programmatic"
)
type urlSubjectProvider struct {
URL string
Headers map[string]string
Format *credsfile.Format
Client *http.Client
}
func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil)
if err != nil {
return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err)
}
for key, val := range sp.Headers {
req.Header.Add(key, val)
}
resp, err := sp.Client.Do(req)
if err != nil {
return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
}
defer resp.Body.Close()
respBody, err := internal.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("credentials: invalid body in subject token URL query: %w", err)
}
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return "", fmt.Errorf("credentials: status code %d: %s", c, respBody)
}
if sp.Format == nil {
return string(respBody), nil
}
switch sp.Format.Type {
case "json":
jsonData := make(map[string]interface{})
err = json.Unmarshal(respBody, &jsonData)
if err != nil {
return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
}
val, ok := jsonData[sp.Format.SubjectTokenFieldName]
if !ok {
return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
}
token, ok := val.(string)
if !ok {
return "", errors.New("credentials: improperly formatted subject token")
}
return token, nil
case fileTypeText:
return string(respBody), nil
default:
return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
}
}
func (sp *urlSubjectProvider) providerType() string {
return urlProviderType
}

View file

@ -0,0 +1,110 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalaccountuser
import (
"context"
"errors"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal"
)
// Options stores the configuration for fetching tokens with external authorized
// user credentials.
type Options struct {
// Audience is the Secure Token Service (STS) audience which contains the
// resource name for the workforce pool and the provider identifier in that
// pool.
Audience string
// RefreshToken is the OAuth 2.0 refresh token.
RefreshToken string
// TokenURL is the STS token exchange endpoint for refresh.
TokenURL string
// TokenInfoURL is the STS endpoint URL for token introspection. Optional.
TokenInfoURL string
// ClientID is only required in conjunction with ClientSecret, as described
// below.
ClientID string
// ClientSecret is currently only required if token_info endpoint also needs
// to be called with the generated a cloud access token. When provided, STS
// will be called with additional basic authentication using client_id as
// username and client_secret as password.
ClientSecret string
// Scopes contains the desired scopes for the returned access token.
Scopes []string
// Client for token request.
Client *http.Client
}
func (c *Options) validate() bool {
return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != ""
}
// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
// configured with the provided options.
func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
if !opts.validate() {
return nil, errors.New("credentials: invalid external_account_authorized_user configuration")
}
tp := &tokenProvider{
o: opts,
}
return auth.NewCachedTokenProvider(tp, nil), nil
}
type tokenProvider struct {
o *Options
}
func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
opts := tp.o
clientAuth := stsexchange.ClientAuthentication{
AuthStyle: auth.StyleInHeader,
ClientID: opts.ClientID,
ClientSecret: opts.ClientSecret,
}
headers := make(http.Header)
headers.Set("Content-Type", "application/x-www-form-urlencoded")
stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{
Client: opts.Client,
Endpoint: opts.TokenURL,
RefreshToken: opts.RefreshToken,
Authentication: clientAuth,
Headers: headers,
})
if err != nil {
return nil, err
}
if stsResponse.ExpiresIn < 0 {
return nil, errors.New("credentials: invalid expiry from security token service")
}
// guarded by the wrapping with CachedTokenProvider
if stsResponse.RefreshToken != "" {
opts.RefreshToken = stsResponse.RefreshToken
}
return &auth.Token{
Value: stsResponse.AccessToken,
Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second),
Type: internal.TokenTypeBearer,
}, nil
}

View file

@ -0,0 +1,182 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gdch
import (
"context"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/auth/internal/jwt"
)
const (
// GrantType is the grant type for the token request.
GrantType = "urn:ietf:params:oauth:token-type:token-exchange"
requestTokenType = "urn:ietf:params:oauth:token-type:access_token"
subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount"
)
var (
gdchSupportFormatVersions map[string]bool = map[string]bool{
"1": true,
}
)
// Options for [NewTokenProvider].
type Options struct {
STSAudience string
Client *http.Client
}
// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
// GDCH cred file.
func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) {
if !gdchSupportFormatVersions[f.FormatVersion] {
return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion)
}
if o.STSAudience == "" {
return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
}
pk, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, err
}
certPool, err := loadCertPool(f.CertPath)
if err != nil {
return nil, err
}
tp := gdchProvider{
serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
tokenURL: f.TokenURL,
aud: o.STSAudience,
pk: pk,
pkID: f.PrivateKeyID,
certPool: certPool,
client: o.Client,
}
return tp, nil
}
func loadCertPool(path string) (*x509.CertPool, error) {
pool := x509.NewCertPool()
pem, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("credentials: failed to read certificate: %w", err)
}
pool.AppendCertsFromPEM(pem)
return pool, nil
}
type gdchProvider struct {
serviceIdentity string
tokenURL string
aud string
pk *rsa.PrivateKey
pkID string
certPool *x509.CertPool
client *http.Client
}
func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
addCertToTransport(g.client, g.certPool)
iat := time.Now()
exp := iat.Add(time.Hour)
claims := jwt.Claims{
Iss: g.serviceIdentity,
Sub: g.serviceIdentity,
Aud: g.tokenURL,
Iat: iat.Unix(),
Exp: exp.Unix(),
}
h := jwt.Header{
Algorithm: jwt.HeaderAlgRSA256,
Type: jwt.HeaderType,
KeyID: string(g.pkID),
}
payload, err := jwt.EncodeJWS(&h, &claims, g.pk)
if err != nil {
return nil, err
}
v := url.Values{}
v.Set("grant_type", GrantType)
v.Set("audience", g.aud)
v.Set("requested_token_type", requestTokenType)
v.Set("subject_token", payload)
v.Set("subject_token_type", subjectTokenType)
resp, err := g.client.PostForm(g.tokenURL, v)
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
defer resp.Body.Close()
body, err := internal.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, &auth.Error{
Response: resp,
Body: body,
}
}
var tokenRes struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int64 `json:"expires_in"` // relative seconds from now
}
if err := json.Unmarshal(body, &tokenRes); err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
token := &auth.Token{
Value: tokenRes.AccessToken,
Type: tokenRes.TokenType,
}
raw := make(map[string]interface{})
json.Unmarshal(body, &raw) // no error checks for optional fields
token.Metadata = raw
if secs := tokenRes.ExpiresIn; secs > 0 {
token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
}
return token, nil
}
// addCertToTransport makes a best effort attempt at adding in the cert info to
// the client. It tries to keep all configured transport settings if the
// underlying transport is an http.Transport. Or else it overwrites the
// transport with defaults adding in the certs.
func addCertToTransport(hc *http.Client, certPool *x509.CertPool) {
trans, ok := hc.Transport.(*http.Transport)
if !ok {
trans = http.DefaultTransport.(*http.Transport).Clone()
}
trans.TLSClientConfig = &tls.Config{
RootCAs: certPool,
}
}

View file

@ -0,0 +1,151 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package impersonate
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
)
const (
defaultTokenLifetime = "3600s"
authHeaderKey = "Authorization"
)
// generateAccesstokenReq is used for service account impersonation
type generateAccessTokenReq struct {
Delegates []string `json:"delegates,omitempty"`
Lifetime string `json:"lifetime,omitempty"`
Scope []string `json:"scope,omitempty"`
}
type impersonateTokenResponse struct {
AccessToken string `json:"accessToken"`
ExpireTime string `json:"expireTime"`
}
// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL.
// Scopes can be defined when the access token is requested.
func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
if err := opts.validate(); err != nil {
return nil, err
}
return opts, nil
}
// Options for [NewTokenProvider].
type Options struct {
// Tp is the source credential used to generate a token on the
// impersonated service account. Required.
Tp auth.TokenProvider
// URL is the endpoint to call to generate a token
// on behalf of the service account. Required.
URL string
// Scopes that the impersonated credential should have. Required.
Scopes []string
// Delegates are the service account email addresses in a delegation chain.
// Each service account must be granted roles/iam.serviceAccountTokenCreator
// on the next service account in the chain. Optional.
Delegates []string
// TokenLifetimeSeconds is the number of seconds the impersonation token will
// be valid for. Defaults to 1 hour if unset. Optional.
TokenLifetimeSeconds int
// Client configures the underlying client used to make network requests
// when fetching tokens. Required.
Client *http.Client
}
func (o *Options) validate() error {
if o.Tp == nil {
return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials")
}
if o.URL == "" {
return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials")
}
return nil
}
// Token performs the exchange to get a temporary service account token to allow access to GCP.
func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
lifetime := defaultTokenLifetime
if o.TokenLifetimeSeconds != 0 {
lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
}
reqBody := generateAccessTokenReq{
Lifetime: lifetime,
Scope: o.Scopes,
Delegates: o.Delegates,
}
b, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("credentials: unable to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if err := setAuthHeader(ctx, o.Tp, req); err != nil {
return nil, err
}
resp, err := o.Client.Do(req)
if err != nil {
return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
}
defer resp.Body.Close()
body, err := internal.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("credentials: unable to read body: %w", err)
}
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}
var accessTokenResp impersonateTokenResponse
if err := json.Unmarshal(body, &accessTokenResp); err != nil {
return nil, fmt.Errorf("credentials: unable to parse response: %w", err)
}
expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime)
if err != nil {
return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err)
}
return &auth.Token{
Value: accessTokenResp.AccessToken,
Expiry: expiry,
Type: internal.TokenTypeBearer,
}, nil
}
func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error {
t, err := tp.Token(ctx)
if err != nil {
return err
}
typ := t.Type
if typ == "" {
typ = internal.TokenTypeBearer
}
r.Header.Set(authHeaderKey, typ+" "+t.Value)
return nil
}

View file

@ -0,0 +1,167 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stsexchange
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
)
const (
// GrantType for a sts exchange.
GrantType = "urn:ietf:params:oauth:grant-type:token-exchange"
// TokenType for a sts exchange.
TokenType = "urn:ietf:params:oauth:token-type:access_token"
jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
)
// Options stores the configuration for making an sts exchange request.
type Options struct {
Client *http.Client
Endpoint string
Request *TokenRequest
Authentication ClientAuthentication
Headers http.Header
// ExtraOpts are optional fields marshalled into the `options` field of the
// request body.
ExtraOpts map[string]interface{}
RefreshToken string
}
// RefreshAccessToken performs the token exchange using a refresh token flow.
func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
data := url.Values{}
data.Set("grant_type", "refresh_token")
data.Set("refresh_token", opts.RefreshToken)
return doRequest(ctx, opts, data)
}
// ExchangeToken performs an oauth2 token exchange with the provided endpoint.
func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
data := url.Values{}
data.Set("audience", opts.Request.Audience)
data.Set("grant_type", GrantType)
data.Set("requested_token_type", TokenType)
data.Set("subject_token_type", opts.Request.SubjectTokenType)
data.Set("subject_token", opts.Request.SubjectToken)
data.Set("scope", strings.Join(opts.Request.Scope, " "))
if opts.ExtraOpts != nil {
opts, err := json.Marshal(opts.ExtraOpts)
if err != nil {
return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err)
}
data.Set("options", string(opts))
}
return doRequest(ctx, opts, data)
}
func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
opts.Authentication.InjectAuthentication(data, opts.Headers)
encodedData := data.Encode()
req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
if err != nil {
return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err)
}
for key, list := range opts.Headers {
for _, val := range list {
req.Header.Add(key, val)
}
}
req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
resp, err := opts.Client.Do(req)
if err != nil {
return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
}
defer resp.Body.Close()
body, err := internal.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}
var stsResp TokenResponse
if err := json.Unmarshal(body, &stsResp); err != nil {
return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err)
}
return &stsResp, nil
}
// TokenRequest contains fields necessary to make an oauth2 token
// exchange.
type TokenRequest struct {
ActingParty struct {
ActorToken string
ActorTokenType string
}
GrantType string
Resource string
Audience string
Scope []string
RequestedTokenType string
SubjectToken string
SubjectTokenType string
}
// TokenResponse is used to decode the remote server response during
// an oauth2 token exchange.
type TokenResponse struct {
AccessToken string `json:"access_token"`
IssuedTokenType string `json:"issued_token_type"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
Scope string `json:"scope"`
RefreshToken string `json:"refresh_token"`
}
// ClientAuthentication represents an OAuth client ID and secret and the
// mechanism for passing these credentials as stated in rfc6749#2.3.1.
type ClientAuthentication struct {
AuthStyle auth.Style
ClientID string
ClientSecret string
}
// InjectAuthentication is used to add authentication to a Secure Token Service
// exchange request. It modifies either the passed url.Values or http.Header
// depending on the desired authentication format.
func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) {
if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil {
return
}
switch c.AuthStyle {
case auth.StyleInHeader:
plainHeader := c.ClientID + ":" + c.ClientSecret
headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader)))
default:
values.Set("client_id", c.ClientID)
values.Set("client_secret", c.ClientSecret)
}
}

View file

@ -0,0 +1,81 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credentials
import (
"context"
"crypto/rsa"
"fmt"
"strings"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/auth/internal/jwt"
)
var (
// for testing
now func() time.Time = time.Now
)
// configureSelfSignedJWT uses the private key in the service account to create
// a JWT without making a network call.
func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
pk, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, fmt.Errorf("credentials: could not parse key: %w", err)
}
return &selfSignedTokenProvider{
email: f.ClientEmail,
audience: opts.Audience,
scopes: opts.scopes(),
pk: pk,
pkID: f.PrivateKeyID,
}, nil
}
type selfSignedTokenProvider struct {
email string
audience string
scopes []string
pk *rsa.PrivateKey
pkID string
}
func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
iat := now()
exp := iat.Add(time.Hour)
scope := strings.Join(tp.scopes, " ")
c := &jwt.Claims{
Iss: tp.email,
Sub: tp.email,
Aud: tp.audience,
Scope: scope,
Iat: iat.Unix(),
Exp: exp.Unix(),
}
h := &jwt.Header{
Algorithm: jwt.HeaderAlgRSA256,
Type: jwt.HeaderType,
KeyID: string(tp.pkID),
}
msg, err := jwt.EncodeJWS(h, c, tp.pk)
if err != nil {
return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
}
return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil
}

View file

@ -0,0 +1,62 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
// +build linux
package grpctransport
import (
"context"
"net"
"syscall"
"google.golang.org/grpc"
)
const (
// defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By
// default is 20 seconds.
tcpUserTimeoutMilliseconds = 20000
// Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT.
tcpUserTimeoutOp = 0x12
)
func init() {
// timeoutDialerOption is a grpc.DialOption that contains dialer with
// socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+.
timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout)
}
func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) {
control := func(network, address string, c syscall.RawConn) error {
var syscallErr error
controlErr := c.Control(func(fd uintptr) {
syscallErr = syscall.SetsockoptInt(
int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds)
})
if syscallErr != nil {
return syscallErr
}
if controlErr != nil {
return controlErr
}
return nil
}
d := &net.Dialer{
Control: control,
}
return d.DialContext(ctx, "tcp", addr)
}

View file

@ -0,0 +1,123 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpctransport
import (
"context"
"net"
"os"
"strconv"
"strings"
"cloud.google.com/go/auth"
"cloud.google.com/go/compute/metadata"
"google.golang.org/grpc"
grpcgoogle "google.golang.org/grpc/credentials/google"
)
func isDirectPathEnabled(endpoint string, opts *Options) bool {
if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath {
return false
}
if !checkDirectPathEndPoint(endpoint) {
return false
}
if b, _ := strconv.ParseBool(os.Getenv(disableDirectPathEnvVar)); b {
return false
}
return true
}
func checkDirectPathEndPoint(endpoint string) bool {
// Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://").
// Also don't try direct path if the user has chosen an alternate name resolver
// (i.e., via ":///" prefix).
if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") {
return false
}
if endpoint == "" {
return false
}
return true
}
func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool {
if tp == nil {
return false
}
tok, err := tp.Token(context.Background())
if err != nil {
return false
}
if tok == nil {
return false
}
if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" {
return false
}
if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" {
return false
}
return true
}
func isDirectPathXdsUsed(o *Options) bool {
// Method 1: Enable DirectPath xDS by env;
if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b {
return true
}
// Method 2: Enable DirectPath xDS by option;
if o.InternalOptions != nil && o.InternalOptions.EnableDirectPathXds {
return true
}
return false
}
// configureDirectPath returns some dial options and an endpoint to use if the
// configuration allows the use of direct path. If it does not the provided
// grpcOpts and endpoint are returned.
func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) {
if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) {
// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
grpcOpts = []grpc.DialOption{
grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))}
if timeoutDialerOption != nil {
grpcOpts = append(grpcOpts, timeoutDialerOption)
}
// Check if google-c2p resolver is enabled for DirectPath
if isDirectPathXdsUsed(opts) {
// google-c2p resolver target must not have a port number
if addr, _, err := net.SplitHostPort(endpoint); err == nil {
endpoint = "google-c2p:///" + addr
} else {
endpoint = "google-c2p:///" + endpoint
}
} else {
if !strings.HasPrefix(endpoint, "dns:///") {
endpoint = "dns:///" + endpoint
}
grpcOpts = append(grpcOpts,
// For now all DirectPath go clients will be using the following lb config, but in future
// when different services need different configs, then we should change this to a
// per-service config.
grpc.WithDisableServiceConfig(),
grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
}
// TODO: add support for system parameters (quota project, request reason) via chained interceptor.
}
return grpcOpts, endpoint
}

View file

@ -0,0 +1,323 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpctransport
import (
"context"
"errors"
"fmt"
"net/http"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"go.opencensus.io/plugin/ocgrpc"
"google.golang.org/grpc"
grpccreds "google.golang.org/grpc/credentials"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
)
const (
// Check env to disable DirectPath traffic.
disableDirectPathEnvVar = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH"
// Check env to decide if using google-c2p resolver for DirectPath traffic.
enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"
quotaProjectHeaderKey = "X-Goog-User-Project"
)
var (
// Set at init time by dial_socketopt.go. If nil, socketopt is not supported.
timeoutDialerOption grpc.DialOption
)
// Options used to configure a [GRPCClientConnPool] from [Dial].
type Options struct {
// DisableTelemetry disables default telemetry (OpenCensus). An example
// reason to do so would be to bind custom telemetry that overrides the
// defaults.
DisableTelemetry bool
// DisableAuthentication specifies that no authentication should be used. It
// is suitable only for testing and for accessing public resources, like
// public Google Cloud Storage buckets.
DisableAuthentication bool
// Endpoint overrides the default endpoint to be used for a service.
Endpoint string
// Metadata is extra gRPC metadata that will be appended to every outgoing
// request.
Metadata map[string]string
// GRPCDialOpts are dial options that will be passed to `grpc.Dial` when
// establishing a`grpc.Conn``
GRPCDialOpts []grpc.DialOption
// PoolSize is specifies how many connections to balance between when making
// requests. If unset or less than 1, the value defaults to 1.
PoolSize int
// Credentials used to add Authorization metadata to all requests. If set
// DetectOpts are ignored.
Credentials *auth.Credentials
// DetectOpts configures settings for detect Application Default
// Credentials.
DetectOpts *credentials.DetectOptions
// UniverseDomain is the default service domain for a given Cloud universe.
// The default value is "googleapis.com". This is the universe domain
// configured for the client, which will be compared to the universe domain
// that is separately configured for the credentials.
UniverseDomain string
// InternalOptions are NOT meant to be set directly by consumers of this
// package, they should only be set by generated client code.
InternalOptions *InternalOptions
}
// client returns the client a user set for the detect options or nil if one was
// not set.
func (o *Options) client() *http.Client {
if o.DetectOpts != nil && o.DetectOpts.Client != nil {
return o.DetectOpts.Client
}
return nil
}
func (o *Options) validate() error {
if o == nil {
return errors.New("grpctransport: opts required to be non-nil")
}
if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
return nil
}
hasCreds := o.Credentials != nil ||
(o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
(o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
if o.DisableAuthentication && hasCreds {
return errors.New("grpctransport: DisableAuthentication is incompatible with options that set or detect credentials")
}
return nil
}
func (o *Options) resolveDetectOptions() *credentials.DetectOptions {
io := o.InternalOptions
// soft-clone these so we are not updating a ref the user holds and may reuse
do := transport.CloneDetectOptions(o.DetectOpts)
// If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
do.UseSelfSignedJWT = true
}
// Only default scopes if user did not also set an audience.
if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
do.Scopes = make([]string, len(io.DefaultScopes))
copy(do.Scopes, io.DefaultScopes)
}
if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
do.Audience = o.InternalOptions.DefaultAudience
}
return do
}
// InternalOptions are only meant to be set by generated client code. These are
// not meant to be set directly by consumers of this package. Configuration in
// this type is considered EXPERIMENTAL and may be removed at any time in the
// future without warning.
type InternalOptions struct {
// EnableNonDefaultSAForDirectPath overrides the default requirement for
// using the default service account for DirectPath.
EnableNonDefaultSAForDirectPath bool
// EnableDirectPath overrides the default attempt to use DirectPath.
EnableDirectPath bool
// EnableDirectPathXds overrides the default DirectPath type. It is only
// valid when DirectPath is enabled.
EnableDirectPathXds bool
// EnableJWTWithScope specifies if scope can be used with self-signed JWT.
EnableJWTWithScope bool
// DefaultAudience specifies a default audience to be used as the audience
// field ("aud") for the JWT token authentication.
DefaultAudience string
// DefaultEndpointTemplate combined with UniverseDomain specifies
// the default endpoint.
DefaultEndpointTemplate string
// DefaultMTLSEndpoint specifies the default mTLS endpoint.
DefaultMTLSEndpoint string
// DefaultScopes specifies the default OAuth2 scopes to be used for a
// service.
DefaultScopes []string
// SkipValidation bypasses validation on Options. It should only be used
// internally for clients that needs more control over their transport.
SkipValidation bool
}
// Dial returns a GRPCClientConnPool that can be used to communicate with a
// Google cloud service, configured with the provided [Options]. It
// automatically appends Authorization metadata to all outgoing requests.
func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, error) {
if err := opts.validate(); err != nil {
return nil, err
}
if opts.PoolSize <= 1 {
conn, err := dial(ctx, secure, opts)
if err != nil {
return nil, err
}
return &singleConnPool{conn}, nil
}
pool := &roundRobinConnPool{}
for i := 0; i < opts.PoolSize; i++ {
conn, err := dial(ctx, secure, opts)
if err != nil {
// ignore close error, if any
defer pool.Close()
return nil, err
}
pool.conns = append(pool.conns, conn)
}
return pool, nil
}
// return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1
func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) {
tOpts := &transport.Options{
Endpoint: opts.Endpoint,
Client: opts.client(),
UniverseDomain: opts.UniverseDomain,
}
if io := opts.InternalOptions; io != nil {
tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
}
transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts)
if err != nil {
return nil, err
}
if !secure {
transportCreds = grpcinsecure.NewCredentials()
}
// Initialize gRPC dial options with transport-level security options.
grpcOpts := []grpc.DialOption{
grpc.WithTransportCredentials(transportCreds),
}
// Authentication can only be sent when communicating over a secure connection.
if !opts.DisableAuthentication {
metadata := opts.Metadata
creds, err := credentials.DetectDefault(opts.resolveDetectOptions())
if err != nil {
return nil, err
}
if opts.Credentials != nil {
creds = opts.Credentials
}
qp, err := creds.QuotaProjectID(ctx)
if err != nil {
return nil, err
}
if qp != "" {
if metadata == nil {
metadata = make(map[string]string, 1)
}
metadata[quotaProjectHeaderKey] = qp
}
grpcOpts = append(grpcOpts,
grpc.WithPerRPCCredentials(&grpcCredentialsProvider{
creds: creds,
metadata: metadata,
clientUniverseDomain: opts.UniverseDomain,
}),
)
// Attempt Direct Path
grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds)
}
// Add tracing, but before the other options, so that clients can override the
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
grpcOpts = addOCStatsHandler(grpcOpts, opts)
grpcOpts = append(grpcOpts, opts.GRPCDialOpts...)
return grpc.DialContext(ctx, endpoint, grpcOpts...)
}
// grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials.
type grpcCredentialsProvider struct {
creds *auth.Credentials
secure bool
// Additional metadata attached as headers.
metadata map[string]string
clientUniverseDomain string
}
// getClientUniverseDomain returns the default service domain for a given Cloud universe.
// The default value is "googleapis.com". This is the universe domain
// configured for the client, which will be compared to the universe domain
// that is separately configured for the credentials.
func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
if c.clientUniverseDomain == "" {
return internal.DefaultUniverseDomain
}
return c.clientUniverseDomain
}
func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx)
if err != nil {
return nil, err
}
if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
return nil, err
}
token, err := c.creds.Token(ctx)
if err != nil {
return nil, err
}
if c.secure {
ri, _ := grpccreds.RequestInfoFromContext(ctx)
if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil {
return nil, fmt.Errorf("unable to transfer credentials PerRPCCredentials: %v", err)
}
}
metadata := make(map[string]string, len(c.metadata)+1)
setAuthMetadata(token, metadata)
for k, v := range c.metadata {
metadata[k] = v
}
return metadata, nil
}
// setAuthMetadata uses the provided token to set the Authorization metadata.
// If the token.Type is empty, the type is assumed to be Bearer.
func setAuthMetadata(token *auth.Token, m map[string]string) {
typ := token.Type
if typ == "" {
typ = internal.TokenTypeBearer
}
m["authorization"] = typ + " " + token.Value
}
func (c *grpcCredentialsProvider) RequireTransportSecurity() bool {
return c.secure
}
func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
if opts.DisableTelemetry {
return dialOpts
}
return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
}

119
vendor/cloud.google.com/go/auth/grpctransport/pool.go generated vendored Normal file
View file

@ -0,0 +1,119 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpctransport
import (
"context"
"fmt"
"sync/atomic"
"google.golang.org/grpc"
)
// GRPCClientConnPool is an interface that satisfies
// [google.golang.org/grpc.ClientConnInterface] and has some utility functions
// that are needed for connection lifecycle when using in a client library. It
// may be a pool or a single connection. This interface is not intended to, and
// can't be, implemented by others.
type GRPCClientConnPool interface {
// Connection returns a [google.golang.org/grpc.ClientConn] from the pool.
//
// ClientConn aren't returned to the pool and should not be closed directly.
Connection() *grpc.ClientConn
// Len returns the number of connections in the pool. It will always return
// the same value.
Len() int
// Close closes every ClientConn in the pool. The error returned by Close
// may be a single error or multiple errors.
Close() error
grpc.ClientConnInterface
// private ensure others outside this package can't implement this type
private()
}
// singleConnPool is a special case for a single connection.
type singleConnPool struct {
*grpc.ClientConn
}
func (p *singleConnPool) Connection() *grpc.ClientConn { return p.ClientConn }
func (p *singleConnPool) Len() int { return 1 }
func (p *singleConnPool) private() {}
type roundRobinConnPool struct {
conns []*grpc.ClientConn
idx uint32 // access via sync/atomic
}
func (p *roundRobinConnPool) Len() int {
return len(p.conns)
}
func (p *roundRobinConnPool) Connection() *grpc.ClientConn {
i := atomic.AddUint32(&p.idx, 1)
return p.conns[i%uint32(len(p.conns))]
}
func (p *roundRobinConnPool) Close() error {
var errs multiError
for _, conn := range p.conns {
if err := conn.Close(); err != nil {
errs = append(errs, err)
}
}
if len(errs) == 0 {
return nil
}
return errs
}
func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error {
return p.Connection().Invoke(ctx, method, args, reply, opts...)
}
func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
return p.Connection().NewStream(ctx, desc, method, opts...)
}
func (p *roundRobinConnPool) private() {}
// multiError represents errors from multiple conns in the group.
type multiError []error
func (m multiError) Error() string {
s, n := "", 0
for _, e := range m {
if e != nil {
if n == 0 {
s = e.Error()
}
n++
}
}
switch n {
case 0:
return "(0 errors)"
case 1:
return s
case 2:
return s + " (and 1 other error)"
}
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
}

View file

@ -0,0 +1,201 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptransport
import (
"crypto/tls"
"errors"
"fmt"
"net/http"
"cloud.google.com/go/auth"
detect "cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
)
// ClientCertProvider is a function that returns a TLS client certificate to be
// used when opening TLS connections. It follows the same semantics as
// [crypto/tls.Config.GetClientCertificate].
type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
// Options used to configure a [net/http.Client] from [NewClient].
type Options struct {
// DisableTelemetry disables default telemetry (OpenCensus). An example
// reason to do so would be to bind custom telemetry that overrides the
// defaults.
DisableTelemetry bool
// DisableAuthentication specifies that no authentication should be used. It
// is suitable only for testing and for accessing public resources, like
// public Google Cloud Storage buckets.
DisableAuthentication bool
// Headers are extra HTTP headers that will be appended to every outgoing
// request.
Headers http.Header
// Endpoint overrides the default endpoint to be used for a service.
Endpoint string
// APIKey specifies an API key to be used as the basis for authentication.
// If set DetectOpts are ignored.
APIKey string
// Credentials used to add Authorization header to all requests. If set
// DetectOpts are ignored.
Credentials *auth.Credentials
// ClientCertProvider is a function that returns a TLS client certificate to
// be used when opening TLS connections. It follows the same semantics as
// crypto/tls.Config.GetClientCertificate.
ClientCertProvider ClientCertProvider
// DetectOpts configures settings for detect Application Default
// Credentials.
DetectOpts *detect.DetectOptions
// UniverseDomain is the default service domain for a given Cloud universe.
// The default value is "googleapis.com". This is the universe domain
// configured for the client, which will be compared to the universe domain
// that is separately configured for the credentials.
UniverseDomain string
// InternalOptions are NOT meant to be set directly by consumers of this
// package, they should only be set by generated client code.
InternalOptions *InternalOptions
}
func (o *Options) validate() error {
if o == nil {
return errors.New("httptransport: opts required to be non-nil")
}
if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
return nil
}
hasCreds := o.APIKey != "" ||
o.Credentials != nil ||
(o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
(o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
if o.DisableAuthentication && hasCreds {
return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials")
}
return nil
}
// client returns the client a user set for the detect options or nil if one was
// not set.
func (o *Options) client() *http.Client {
if o.DetectOpts != nil && o.DetectOpts.Client != nil {
return o.DetectOpts.Client
}
return nil
}
func (o *Options) resolveDetectOptions() *detect.DetectOptions {
io := o.InternalOptions
// soft-clone these so we are not updating a ref the user holds and may reuse
do := transport.CloneDetectOptions(o.DetectOpts)
// If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
do.UseSelfSignedJWT = true
}
// Only default scopes if user did not also set an audience.
if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
do.Scopes = make([]string, len(io.DefaultScopes))
copy(do.Scopes, io.DefaultScopes)
}
if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
do.Audience = o.InternalOptions.DefaultAudience
}
return do
}
// InternalOptions are only meant to be set by generated client code. These are
// not meant to be set directly by consumers of this package. Configuration in
// this type is considered EXPERIMENTAL and may be removed at any time in the
// future without warning.
type InternalOptions struct {
// EnableJWTWithScope specifies if scope can be used with self-signed JWT.
EnableJWTWithScope bool
// DefaultAudience specifies a default audience to be used as the audience
// field ("aud") for the JWT token authentication.
DefaultAudience string
// DefaultEndpointTemplate combined with UniverseDomain specifies the
// default endpoint.
DefaultEndpointTemplate string
// DefaultMTLSEndpoint specifies the default mTLS endpoint.
DefaultMTLSEndpoint string
// DefaultScopes specifies the default OAuth2 scopes to be used for a
// service.
DefaultScopes []string
// SkipValidation bypasses validation on Options. It should only be used
// internally for clients that needs more control over their transport.
SkipValidation bool
}
// AddAuthorizationMiddleware adds a middleware to the provided client's
// transport that sets the Authorization header with the value produced by the
// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
// if client or creds is nil.
func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
if client == nil || creds == nil {
return fmt.Errorf("httptransport: client and tp must not be nil")
}
base := client.Transport
if base == nil {
base = http.DefaultTransport.(*http.Transport).Clone()
}
client.Transport = &authTransport{
creds: creds,
base: base,
// TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls.
}
return nil
}
// NewClient returns a [net/http.Client] that can be used to communicate with a
// Google cloud service, configured with the provided [Options]. It
// automatically appends Authorization headers to all outgoing requests.
func NewClient(opts *Options) (*http.Client, error) {
if err := opts.validate(); err != nil {
return nil, err
}
tOpts := &transport.Options{
Endpoint: opts.Endpoint,
ClientCertProvider: opts.ClientCertProvider,
Client: opts.client(),
UniverseDomain: opts.UniverseDomain,
}
if io := opts.InternalOptions; io != nil {
tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
}
clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts)
if err != nil {
return nil, err
}
trans, err := newTransport(defaultBaseTransport(clientCertProvider, dialTLSContext), opts)
if err != nil {
return nil, err
}
return &http.Client{
Transport: trans,
}, nil
}
// SetAuthHeader uses the provided token to set the Authorization header on a
// request. If the token.Type is empty, the type is assumed to be Bearer.
func SetAuthHeader(token *auth.Token, req *http.Request) {
typ := token.Type
if typ == "" {
typ = internal.TokenTypeBearer
}
req.Header.Set("Authorization", typ+" "+token.Value)
}

93
vendor/cloud.google.com/go/auth/httptransport/trace.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptransport
import (
"encoding/binary"
"encoding/hex"
"fmt"
"net/http"
"strconv"
"strings"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
const (
httpHeaderMaxSize = 200
cloudTraceHeader = `X-Cloud-Trace-Context`
)
// asserts the httpFormat fulfills this foreign interface
var _ propagation.HTTPFormat = (*httpFormat)(nil)
// httpFormat implements propagation.httpFormat to propagate
// traces in HTTP headers for Google Cloud Platform and Cloud Trace.
type httpFormat struct{}
// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests.
func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
h := req.Header.Get(cloudTraceHeader)
// See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
// Return if the header is empty or missing, or if the header is unreasonably
// large, to avoid making unnecessary copies of a large string.
if h == "" || len(h) > httpHeaderMaxSize {
return trace.SpanContext{}, false
}
// Parse the trace id field.
slash := strings.Index(h, `/`)
if slash == -1 {
return trace.SpanContext{}, false
}
tid, h := h[:slash], h[slash+1:]
buf, err := hex.DecodeString(tid)
if err != nil {
return trace.SpanContext{}, false
}
copy(sc.TraceID[:], buf)
// Parse the span id field.
spanstr := h
semicolon := strings.Index(h, `;`)
if semicolon != -1 {
spanstr, h = h[:semicolon], h[semicolon+1:]
}
sid, err := strconv.ParseUint(spanstr, 10, 64)
if err != nil {
return trace.SpanContext{}, false
}
binary.BigEndian.PutUint64(sc.SpanID[:], sid)
// Parse the options field, options field is optional.
if !strings.HasPrefix(h, "o=") {
return sc, true
}
o, err := strconv.ParseUint(h[2:], 10, 32)
if err != nil {
return trace.SpanContext{}, false
}
sc.TraceOptions = trace.TraceOptions(o)
return sc, true
}
// SpanContextToRequest modifies the given request to include a Cloud Trace header.
func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
sid := binary.BigEndian.Uint64(sc.SpanID[:])
header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
req.Header.Set(cloudTraceHeader, header)
}

View file

@ -0,0 +1,209 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptransport
import (
"context"
"crypto/tls"
"net"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"cloud.google.com/go/auth/internal/transport/cert"
"go.opencensus.io/plugin/ochttp"
"golang.org/x/net/http2"
)
const (
quotaProjectHeaderKey = "X-Goog-User-Project"
)
func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
var headers = opts.Headers
ht := &headerTransport{
base: base,
headers: headers,
}
var trans http.RoundTripper = ht
trans = addOCTransport(trans, opts)
switch {
case opts.DisableAuthentication:
// Do nothing.
case opts.APIKey != "":
qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey))
if qp != "" {
if headers == nil {
headers = make(map[string][]string, 1)
}
headers.Set(quotaProjectHeaderKey, qp)
}
trans = &apiKeyTransport{
Transport: trans,
Key: opts.APIKey,
}
default:
creds, err := credentials.DetectDefault(opts.resolveDetectOptions())
if err != nil {
return nil, err
}
qp, err := creds.QuotaProjectID(context.Background())
if err != nil {
return nil, err
}
if qp != "" {
if headers == nil {
headers = make(map[string][]string, 1)
}
headers.Set(quotaProjectHeaderKey, qp)
}
if opts.Credentials != nil {
creds = opts.Credentials
}
creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
trans = &authTransport{
base: trans,
creds: creds,
clientUniverseDomain: opts.UniverseDomain,
}
}
return trans, nil
}
// defaultBaseTransport returns the base HTTP transport.
// On App Engine, this is urlfetch.Transport.
// Otherwise, use a default transport, taking most defaults from
// http.DefaultTransport.
// If TLSCertificate is available, set TLSClientConfig as well.
func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
trans := http.DefaultTransport.(*http.Transport).Clone()
trans.MaxIdleConnsPerHost = 100
if clientCertSource != nil {
trans.TLSClientConfig = &tls.Config{
GetClientCertificate: clientCertSource,
}
}
if dialTLSContext != nil {
// If DialTLSContext is set, TLSClientConfig wil be ignored
trans.DialTLSContext = dialTLSContext
}
// Configures the ReadIdleTimeout HTTP/2 option for the
// transport. This allows broken idle connections to be pruned more quickly,
// preventing the client from attempting to re-use connections that will no
// longer work.
http2Trans, err := http2.ConfigureTransports(trans)
if err == nil {
http2Trans.ReadIdleTimeout = time.Second * 31
}
return trans
}
type apiKeyTransport struct {
// Key is the API Key to set on requests.
Key string
// Transport is the underlying HTTP transport.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
}
func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
newReq := *req
args := newReq.URL.Query()
args.Set("key", t.Key)
newReq.URL.RawQuery = args.Encode()
return t.Transport.RoundTrip(&newReq)
}
type headerTransport struct {
headers http.Header
base http.RoundTripper
}
func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.base
newReq := *req
newReq.Header = make(http.Header)
for k, vv := range req.Header {
newReq.Header[k] = vv
}
for k, v := range t.headers {
newReq.Header[k] = v
}
return rt.RoundTrip(&newReq)
}
func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
if opts.DisableTelemetry {
return trans
}
return &ochttp.Transport{
Base: trans,
Propagation: &httpFormat{},
}
}
type authTransport struct {
creds *auth.Credentials
base http.RoundTripper
clientUniverseDomain string
}
// getClientUniverseDomain returns the universe domain configured for the client.
// The default value is "googleapis.com".
func (t *authTransport) getClientUniverseDomain() string {
if t.clientUniverseDomain == "" {
return internal.DefaultUniverseDomain
}
return t.clientUniverseDomain
}
// RoundTrip authorizes and authenticates the request with an
// access token from Transport's Source. Per the RoundTripper contract we must
// not modify the initial request, so we clone it, and we must close the body
// on any errors that happens during our token logic.
func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
reqBodyClosed := false
if req.Body != nil {
defer func() {
if !reqBodyClosed {
req.Body.Close()
}
}()
}
credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
if err != nil {
return nil, err
}
if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
return nil, err
}
token, err := t.creds.Token(req.Context())
if err != nil {
return nil, err
}
req2 := req.Clone(req.Context())
SetAuthHeader(token, req2)
reqBodyClosed = true
return t.base.RoundTrip(req2)
}

View file

@ -0,0 +1,107 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package credsfile is meant to hide implementation details from the pubic
// surface of the detect package. It should not import any other packages in
// this module. It is located under the main internal package so other
// sub-packages can use these parsed types as well.
package credsfile
import (
"os"
"os/user"
"path/filepath"
"runtime"
)
const (
// GoogleAppCredsEnvVar is the environment variable for setting the
// application default credentials.
GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
userCredsFilename = "application_default_credentials.json"
)
// CredentialType represents different credential filetypes Google credentials
// can be.
type CredentialType int
const (
// UnknownCredType is an unidentified file type.
UnknownCredType CredentialType = iota
// UserCredentialsKey represents a user creds file type.
UserCredentialsKey
// ServiceAccountKey represents a service account file type.
ServiceAccountKey
// ImpersonatedServiceAccountKey represents a impersonated service account
// file type.
ImpersonatedServiceAccountKey
// ExternalAccountKey represents a external account file type.
ExternalAccountKey
// GDCHServiceAccountKey represents a GDCH file type.
GDCHServiceAccountKey
// ExternalAccountAuthorizedUserKey represents a external account authorized
// user file type.
ExternalAccountAuthorizedUserKey
)
// parseCredentialType returns the associated filetype based on the parsed
// typeString provided.
func parseCredentialType(typeString string) CredentialType {
switch typeString {
case "service_account":
return ServiceAccountKey
case "authorized_user":
return UserCredentialsKey
case "impersonated_service_account":
return ImpersonatedServiceAccountKey
case "external_account":
return ExternalAccountKey
case "external_account_authorized_user":
return ExternalAccountAuthorizedUserKey
case "gdch_service_account":
return GDCHServiceAccountKey
default:
return UnknownCredType
}
}
// GetFileNameFromEnv returns the override if provided or detects a filename
// from the environment.
func GetFileNameFromEnv(override string) string {
if override != "" {
return override
}
return os.Getenv(GoogleAppCredsEnvVar)
}
// GetWellKnownFileName tries to locate the filepath for the user credential
// file based on the environment.
func GetWellKnownFileName() string {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename)
}
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename)
}
// guessUnixHomeDir default to checking for HOME, but not all unix systems have
// this set, do have a fallback.
func guessUnixHomeDir() string {
if v := os.Getenv("HOME"); v != "" {
return v
}
if u, err := user.Current(); err == nil {
return u.HomeDir
}
return ""
}

View file

@ -0,0 +1,149 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credsfile
import (
"encoding/json"
)
// Config3LO is the internals of a client creds file.
type Config3LO struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectURIs []string `json:"redirect_uris"`
AuthURI string `json:"auth_uri"`
TokenURI string `json:"token_uri"`
}
// ClientCredentialsFile representation.
type ClientCredentialsFile struct {
Web *Config3LO `json:"web"`
Installed *Config3LO `json:"installed"`
UniverseDomain string `json:"universe_domain"`
}
// ServiceAccountFile representation.
type ServiceAccountFile struct {
Type string `json:"type"`
ProjectID string `json:"project_id"`
PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
ClientEmail string `json:"client_email"`
ClientID string `json:"client_id"`
AuthURL string `json:"auth_uri"`
TokenURL string `json:"token_uri"`
UniverseDomain string `json:"universe_domain"`
}
// UserCredentialsFile representation.
type UserCredentialsFile struct {
Type string `json:"type"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
QuotaProjectID string `json:"quota_project_id"`
RefreshToken string `json:"refresh_token"`
UniverseDomain string `json:"universe_domain"`
}
// ExternalAccountFile representation.
type ExternalAccountFile struct {
Type string `json:"type"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Audience string `json:"audience"`
SubjectTokenType string `json:"subject_token_type"`
ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
TokenURL string `json:"token_url"`
CredentialSource *CredentialSource `json:"credential_source,omitempty"`
TokenInfoURL string `json:"token_info_url"`
ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"`
QuotaProjectID string `json:"quota_project_id"`
WorkforcePoolUserProject string `json:"workforce_pool_user_project"`
UniverseDomain string `json:"universe_domain"`
}
// ExternalAccountAuthorizedUserFile representation.
type ExternalAccountAuthorizedUserFile struct {
Type string `json:"type"`
Audience string `json:"audience"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RefreshToken string `json:"refresh_token"`
TokenURL string `json:"token_url"`
TokenInfoURL string `json:"token_info_url"`
RevokeURL string `json:"revoke_url"`
QuotaProjectID string `json:"quota_project_id"`
UniverseDomain string `json:"universe_domain"`
}
// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange.
//
// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question.
// The EnvironmentID should start with AWS if being used for an AWS credential.
type CredentialSource struct {
File string `json:"file"`
URL string `json:"url"`
Headers map[string]string `json:"headers"`
Executable *ExecutableConfig `json:"executable,omitempty"`
EnvironmentID string `json:"environment_id"`
RegionURL string `json:"region_url"`
RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
CredVerificationURL string `json:"cred_verification_url"`
IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
Format *Format `json:"format,omitempty"`
}
// Format describes the format of a [CredentialSource].
type Format struct {
// Type is either "text" or "json". When not provided "text" type is assumed.
Type string `json:"type"`
// SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure.
SubjectTokenFieldName string `json:"subject_token_field_name"`
}
// ExecutableConfig represents the command to run for an executable
// [CredentialSource].
type ExecutableConfig struct {
Command string `json:"command"`
TimeoutMillis int `json:"timeout_millis"`
OutputFile string `json:"output_file"`
}
// ServiceAccountImpersonationInfo has impersonation configuration.
type ServiceAccountImpersonationInfo struct {
TokenLifetimeSeconds int `json:"token_lifetime_seconds"`
}
// ImpersonatedServiceAccountFile representation.
type ImpersonatedServiceAccountFile struct {
Type string `json:"type"`
ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
Delegates []string `json:"delegates"`
CredSource json.RawMessage `json:"source_credentials"`
UniverseDomain string `json:"universe_domain"`
}
// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file.
type GDCHServiceAccountFile struct {
Type string `json:"type"`
FormatVersion string `json:"format_version"`
Project string `json:"project"`
Name string `json:"name"`
CertPath string `json:"ca_cert_path"`
PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
TokenURL string `json:"token_uri"`
UniverseDomain string `json:"universe_domain"`
}

View file

@ -0,0 +1,98 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credsfile
import (
"encoding/json"
)
// ParseServiceAccount parses bytes into a [ServiceAccountFile].
func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) {
var f *ServiceAccountFile
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
return f, nil
}
// ParseClientCredentials parses bytes into a
// [credsfile.ClientCredentialsFile].
func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) {
var f *ClientCredentialsFile
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
return f, nil
}
// ParseUserCredentials parses bytes into a [UserCredentialsFile].
func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) {
var f *UserCredentialsFile
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
return f, nil
}
// ParseExternalAccount parses bytes into a [ExternalAccountFile].
func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) {
var f *ExternalAccountFile
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
return f, nil
}
// ParseExternalAccountAuthorizedUser parses bytes into a
// [ExternalAccountAuthorizedUserFile].
func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) {
var f *ExternalAccountAuthorizedUserFile
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
return f, nil
}
// ParseImpersonatedServiceAccount parses bytes into a
// [ImpersonatedServiceAccountFile].
func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) {
var f *ImpersonatedServiceAccountFile
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
return f, nil
}
// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile].
func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) {
var f *GDCHServiceAccountFile
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
return f, nil
}
type fileTypeChecker struct {
Type string `json:"type"`
}
// ParseFileType determines the [CredentialType] based on bytes provided.
func ParseFileType(b []byte) (CredentialType, error) {
var f fileTypeChecker
if err := json.Unmarshal(b, &f); err != nil {
return 0, err
}
return parseCredentialType(f.Type), nil
}

184
vendor/cloud.google.com/go/auth/internal/internal.go generated vendored Normal file
View file

@ -0,0 +1,184 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"net/http"
"os"
"sync"
"time"
"cloud.google.com/go/compute/metadata"
)
const (
// TokenTypeBearer is the auth header prefix for bearer tokens.
TokenTypeBearer = "Bearer"
// QuotaProjectEnvVar is the environment variable for setting the quota
// project.
QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
projectEnvVar = "GOOGLE_CLOUD_PROJECT"
maxBodySize = 1 << 20
// DefaultUniverseDomain is the default value for universe domain.
// Universe domain is the default service domain for a given Cloud universe.
DefaultUniverseDomain = "googleapis.com"
)
// CloneDefaultClient returns a [http.Client] with some good defaults.
func CloneDefaultClient() *http.Client {
return &http.Client{
Transport: http.DefaultTransport.(*http.Transport).Clone(),
Timeout: 30 * time.Second,
}
}
// ParseKey converts the binary contents of a private key file
// to an *rsa.PrivateKey. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
block, _ := pem.Decode(key)
if block != nil {
key = block.Bytes
}
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("private key is invalid")
}
return parsed, nil
}
// GetQuotaProject retrieves quota project with precedence being: override,
// environment variable, creds json file.
func GetQuotaProject(b []byte, override string) string {
if override != "" {
return override
}
if env := os.Getenv(QuotaProjectEnvVar); env != "" {
return env
}
if b == nil {
return ""
}
var v struct {
QuotaProject string `json:"quota_project_id"`
}
if err := json.Unmarshal(b, &v); err != nil {
return ""
}
return v.QuotaProject
}
// GetProjectID retrieves project with precedence being: override,
// environment variable, creds json file.
func GetProjectID(b []byte, override string) string {
if override != "" {
return override
}
if env := os.Getenv(projectEnvVar); env != "" {
return env
}
if b == nil {
return ""
}
var v struct {
ProjectID string `json:"project_id"` // standard service account key
Project string `json:"project"` // gdch key
}
if err := json.Unmarshal(b, &v); err != nil {
return ""
}
if v.ProjectID != "" {
return v.ProjectID
}
return v.Project
}
// ReadAll consumes the whole reader and safely reads the content of its body
// with some overflow protection.
func ReadAll(r io.Reader) ([]byte, error) {
return io.ReadAll(io.LimitReader(r, maxBodySize))
}
// StaticCredentialsProperty is a helper for creating static credentials
// properties.
func StaticCredentialsProperty(s string) StaticProperty {
return StaticProperty(s)
}
// StaticProperty always returns that value of the underlying string.
type StaticProperty string
// GetProperty loads the properly value provided the given context.
func (p StaticProperty) GetProperty(context.Context) (string, error) {
return string(p), nil
}
// ComputeUniverseDomainProvider fetches the credentials universe domain from
// the google cloud metadata service.
type ComputeUniverseDomainProvider struct {
universeDomainOnce sync.Once
universeDomain string
universeDomainErr error
}
// GetProperty fetches the credentials universe domain from the google cloud
// metadata service.
func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
c.universeDomainOnce.Do(func() {
c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx)
})
if c.universeDomainErr != nil {
return "", c.universeDomainErr
}
return c.universeDomain, nil
}
// httpGetMetadataUniverseDomain is a package var for unit test substitution.
var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) {
client := metadata.NewClient(&http.Client{Timeout: time.Second})
// TODO(quartzmo): set ctx on request
return client.Get("universe/universe_domain")
}
func getMetadataUniverseDomain(ctx context.Context) (string, error) {
universeDomain, err := httpGetMetadataUniverseDomain(ctx)
if err == nil {
return universeDomain, nil
}
if _, ok := err.(metadata.NotDefinedError); ok {
// http.StatusNotFound (404)
return DefaultUniverseDomain, nil
}
return "", err
}

171
vendor/cloud.google.com/go/auth/internal/jwt/jwt.go generated vendored Normal file
View file

@ -0,0 +1,171 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jwt
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
)
const (
// HeaderAlgRSA256 is the RS256 [Header.Algorithm].
HeaderAlgRSA256 = "RS256"
// HeaderAlgES256 is the ES256 [Header.Algorithm].
HeaderAlgES256 = "ES256"
// HeaderType is the standard [Header.Type].
HeaderType = "JWT"
)
// Header represents a JWT header.
type Header struct {
Algorithm string `json:"alg"`
Type string `json:"typ"`
KeyID string `json:"kid"`
}
func (h *Header) encode() (string, error) {
b, err := json.Marshal(h)
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(b), nil
}
// Claims represents the claims set of a JWT.
type Claims struct {
// Iss is the issuer JWT claim.
Iss string `json:"iss"`
// Scope is the scope JWT claim.
Scope string `json:"scope,omitempty"`
// Exp is the expiry JWT claim. If unset, default is in one hour from now.
Exp int64 `json:"exp"`
// Iat is the subject issued at claim. If unset, default is now.
Iat int64 `json:"iat"`
// Aud is the audience JWT claim. Optional.
Aud string `json:"aud"`
// Sub is the subject JWT claim. Optional.
Sub string `json:"sub,omitempty"`
// AdditionalClaims contains any additional non-standard JWT claims. Optional.
AdditionalClaims map[string]interface{} `json:"-"`
}
func (c *Claims) encode() (string, error) {
// Compensate for skew
now := time.Now().Add(-10 * time.Second)
if c.Iat == 0 {
c.Iat = now.Unix()
}
if c.Exp == 0 {
c.Exp = now.Add(time.Hour).Unix()
}
if c.Exp < c.Iat {
return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat)
}
b, err := json.Marshal(c)
if err != nil {
return "", err
}
if len(c.AdditionalClaims) == 0 {
return base64.RawURLEncoding.EncodeToString(b), nil
}
// Marshal private claim set and then append it to b.
prv, err := json.Marshal(c.AdditionalClaims)
if err != nil {
return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err)
}
// Concatenate public and private claim JSON objects.
if !bytes.HasSuffix(b, []byte{'}'}) {
return "", fmt.Errorf("invalid JSON %s", b)
}
if !bytes.HasPrefix(prv, []byte{'{'}) {
return "", fmt.Errorf("invalid JSON %s", prv)
}
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
b = append(b, prv[1:]...) // Append private claims.
return base64.RawURLEncoding.EncodeToString(b), nil
}
// EncodeJWS encodes the data using the provided key as a JSON web signature.
func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
head, err := header.encode()
if err != nil {
return "", err
}
claims, err := c.encode()
if err != nil {
return "", err
}
ss := fmt.Sprintf("%s.%s", head, claims)
h := sha256.New()
h.Write([]byte(ss))
sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
if err != nil {
return "", err
}
return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
}
// DecodeJWS decodes a claim set from a JWS payload.
func DecodeJWS(payload string) (*Claims, error) {
// decode returned id token to get expiry
s := strings.Split(payload, ".")
if len(s) < 2 {
return nil, errors.New("invalid token received")
}
decoded, err := base64.RawURLEncoding.DecodeString(s[1])
if err != nil {
return nil, err
}
c := &Claims{}
if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil {
return nil, err
}
if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil {
return nil, err
}
return c, err
}
// VerifyJWS tests whether the provided JWT token's signature was produced by
// the private key associated with the provided public key.
func VerifyJWS(token string, key *rsa.PublicKey) error {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return errors.New("jwt: invalid token received, token must have 3 parts")
}
signedContent := parts[0] + "." + parts[1]
signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
if err != nil {
return err
}
h := sha256.New()
h.Write([]byte(signedContent))
return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString)
}

View file

@ -0,0 +1,300 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"context"
"crypto/tls"
"errors"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport/cert"
"github.com/google/s2a-go"
"github.com/google/s2a-go/fallback"
"google.golang.org/grpc/credentials"
)
const (
mTLSModeAlways = "always"
mTLSModeNever = "never"
mTLSModeAuto = "auto"
// Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false.
googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A"
googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT"
googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS"
universeDomainPlaceholder = "UNIVERSE_DOMAIN"
)
var (
mdsMTLSAutoConfigSource mtlsConfigSource
errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com")
)
// Options is a struct that is duplicated information from the individual
// transport packages in order to avoid cyclic deps. It correlates 1:1 with
// fields on httptransport.Options and grpctransport.Options.
type Options struct {
Endpoint string
DefaultMTLSEndpoint string
DefaultEndpointTemplate string
ClientCertProvider cert.Provider
Client *http.Client
UniverseDomain string
}
// getUniverseDomain returns the default service domain for a given Cloud
// universe.
func (o *Options) getUniverseDomain() string {
if o.UniverseDomain == "" {
return internal.DefaultUniverseDomain
}
return o.UniverseDomain
}
// isUniverseDomainGDU returns true if the universe domain is the default Google
// universe.
func (o *Options) isUniverseDomainGDU() bool {
return o.getUniverseDomain() == internal.DefaultUniverseDomain
}
// defaultEndpoint returns the DefaultEndpointTemplate merged with the
// universe domain if the DefaultEndpointTemplate is set, otherwise returns an
// empty string.
func (o *Options) defaultEndpoint() string {
if o.DefaultEndpointTemplate == "" {
return ""
}
return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
}
// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
// default endpoint.
func (o *Options) mergedEndpoint() (string, error) {
defaultEndpoint := o.defaultEndpoint()
u, err := url.Parse(fixScheme(defaultEndpoint))
if err != nil {
return "", err
}
return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil
}
func fixScheme(baseURL string) string {
if !strings.Contains(baseURL, "://") {
baseURL = "https://" + baseURL
}
return baseURL
}
// GetGRPCTransportCredsAndEndpoint returns an instance of
// [google.golang.org/grpc/credentials.TransportCredentials], and the
// corresponding endpoint to use for GRPC client.
func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) {
config, err := getTransportConfig(opts)
if err != nil {
return nil, "", err
}
defaultTransportCreds := credentials.NewTLS(&tls.Config{
GetClientCertificate: config.clientCertSource,
})
if config.s2aAddress == "" {
return defaultTransportCreds, config.endpoint, nil
}
var fallbackOpts *s2a.FallbackOptions
// In case of S2A failure, fall back to the endpoint that would've been used without S2A.
if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil {
fallbackOpts = &s2a.FallbackOptions{
FallbackClientHandshakeFunc: fallbackHandshake,
}
}
s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{
S2AAddress: config.s2aAddress,
FallbackOpts: fallbackOpts,
})
if err != nil {
// Use default if we cannot initialize S2A client transport credentials.
return defaultTransportCreds, config.endpoint, nil
}
return s2aTransportCreds, config.s2aMTLSEndpoint, nil
}
// GetHTTPTransportConfig returns a client certificate source and a function for
// dialing MTLS with S2A.
func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) {
config, err := getTransportConfig(opts)
if err != nil {
return nil, nil, err
}
if config.s2aAddress == "" {
return config.clientCertSource, nil, nil
}
var fallbackOpts *s2a.FallbackOptions
// In case of S2A failure, fall back to the endpoint that would've been used without S2A.
if fallbackURL, err := url.Parse(config.endpoint); err == nil {
if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil {
fallbackOpts = &s2a.FallbackOptions{
FallbackDialer: &s2a.FallbackDialer{
Dialer: fallbackDialer,
ServerAddr: fallbackServerAddr,
},
}
}
}
dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{
S2AAddress: config.s2aAddress,
FallbackOpts: fallbackOpts,
})
return nil, dialTLSContextFunc, nil
}
func getTransportConfig(opts *Options) (*transportConfig, error) {
clientCertSource, err := getClientCertificateSource(opts)
if err != nil {
return nil, err
}
endpoint, err := getEndpoint(opts, clientCertSource)
if err != nil {
return nil, err
}
defaultTransportConfig := transportConfig{
clientCertSource: clientCertSource,
endpoint: endpoint,
}
if !shouldUseS2A(clientCertSource, opts) {
return &defaultTransportConfig, nil
}
if !opts.isUniverseDomainGDU() {
return nil, errUniverseNotSupportedMTLS
}
s2aMTLSEndpoint := opts.DefaultMTLSEndpoint
// If there is endpoint override, honor it.
if opts.Endpoint != "" {
s2aMTLSEndpoint = endpoint
}
s2aAddress := GetS2AAddress()
if s2aAddress == "" {
return &defaultTransportConfig, nil
}
return &transportConfig{
clientCertSource: clientCertSource,
endpoint: endpoint,
s2aAddress: s2aAddress,
s2aMTLSEndpoint: s2aMTLSEndpoint,
}, nil
}
// getClientCertificateSource returns a default client certificate source, if
// not provided by the user.
//
// A nil default source can be returned if the source does not exist. Any exceptions
// encountered while initializing the default source will be reported as client
// error (ex. corrupt metadata file).
//
// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE
// must be set to "true" to allow certificate to be used (including user provided
// certificates). For details, see AIP-4114.
func getClientCertificateSource(opts *Options) (cert.Provider, error) {
if !isClientCertificateEnabled() {
return nil, nil
} else if opts.ClientCertProvider != nil {
return opts.ClientCertProvider, nil
}
return cert.DefaultProvider()
}
func isClientCertificateEnabled() bool {
// TODO(andyrzhao): Update default to return "true" after DCA feature is fully released.
// error as false is a good default
b, _ := strconv.ParseBool(os.Getenv(googleAPIUseCertSource))
return b
}
type transportConfig struct {
// The client certificate source.
clientCertSource cert.Provider
// The corresponding endpoint to use based on client certificate source.
endpoint string
// The S2A address if it can be used, otherwise an empty string.
s2aAddress string
// The MTLS endpoint to use with S2A.
s2aMTLSEndpoint string
}
// getEndpoint returns the endpoint for the service, taking into account the
// user-provided endpoint override "settings.Endpoint".
//
// If no endpoint override is specified, we will either return the default endpoint or
// the default mTLS endpoint if a client certificate is available.
//
// You can override the default endpoint choice (mtls vs. regular) by setting the
// GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
//
// If the endpoint override is an address (host:port) rather than full base
// URL (ex. https://...), then the user-provided address will be merged into
// the default endpoint. For example, WithEndpoint("myhost:8000") and
// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz"
func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
if opts.Endpoint == "" {
mtlsMode := getMTLSMode()
if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
if !opts.isUniverseDomainGDU() {
return "", errUniverseNotSupportedMTLS
}
return opts.DefaultMTLSEndpoint, nil
}
return opts.defaultEndpoint(), nil
}
if strings.Contains(opts.Endpoint, "://") {
// User passed in a full URL path, use it verbatim.
return opts.Endpoint, nil
}
if opts.defaultEndpoint() == "" {
// If DefaultEndpointTemplate is not configured,
// use the user provided endpoint verbatim. This allows a naked
// "host[:port]" URL to be used with GRPC Direct Path.
return opts.Endpoint, nil
}
// Assume user-provided endpoint is host[:port], merge it with the default endpoint.
return opts.mergedEndpoint()
}
func getMTLSMode() string {
mode := os.Getenv(googleAPIUseMTLS)
if mode == "" {
mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated.
}
if mode == "" {
return mTLSModeAuto
}
return strings.ToLower(mode)
}

View file

@ -0,0 +1,62 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cert
import (
"crypto/tls"
"errors"
"sync"
)
// defaultCertData holds all the variables pertaining to
// the default certificate provider created by [DefaultProvider].
//
// A singleton model is used to allow the provider to be reused
// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil)
// may be returned to indicate a default provider could not be found, which
// will skip extra tls config in the transport layer .
type defaultCertData struct {
once sync.Once
provider Provider
err error
}
var (
defaultCert defaultCertData
)
// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate.
type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable.
var errSourceUnavailable = errors.New("certificate source is unavailable")
// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource.
// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource.
//
// If neither source is available (due to missing configurations), a nil Source and a nil Error are
// returned to indicate that a default certificate source is unavailable.
func DefaultProvider() (Provider, error) {
defaultCert.once.Do(func() {
defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("")
if errors.Is(defaultCert.err, errSourceUnavailable) {
defaultCert.provider, defaultCert.err = NewSecureConnectProvider("")
if errors.Is(defaultCert.err, errSourceUnavailable) {
defaultCert.provider, defaultCert.err = nil, nil
}
}
})
return defaultCert.provider, defaultCert.err
}

View file

@ -0,0 +1,56 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cert
import (
"crypto/tls"
"errors"
"github.com/googleapis/enterprise-certificate-proxy/client"
)
type ecpSource struct {
key *client.Key
}
// NewEnterpriseCertificateProxyProvider creates a certificate source
// using the Enterprise Certificate Proxy client, which delegates
// certifcate related operations to an OS-specific "signer binary"
// that communicates with the native keystore (ex. keychain on MacOS).
//
// The configFilePath points to a config file containing relevant parameters
// such as the certificate issuer and the location of the signer binary.
// If configFilePath is empty, the client will attempt to load the config from
// a well-known gcloud location.
func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
key, err := client.Cred(configFilePath)
if err != nil {
if errors.Is(err, client.ErrCredUnavailable) {
return nil, errSourceUnavailable
}
return nil, err
}
return (&ecpSource{
key: key,
}).getClientCertificate, nil
}
func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
var cert tls.Certificate
cert.PrivateKey = s.key
cert.Certificate = s.key.CertificateChain()
return &cert, nil
}

View file

@ -0,0 +1,124 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cert
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"os"
"os/exec"
"os/user"
"path/filepath"
"sync"
"time"
)
const (
metadataPath = ".secureConnect"
metadataFile = "context_aware_metadata.json"
)
type secureConnectSource struct {
metadata secureConnectMetadata
// Cache the cert to avoid executing helper command repeatedly.
cachedCertMutex sync.Mutex
cachedCert *tls.Certificate
}
type secureConnectMetadata struct {
Cmd []string `json:"cert_provider_command"`
}
// NewSecureConnectProvider creates a certificate source using
// the Secure Connect Helper and its associated metadata file.
//
// The configFilePath points to the location of the context aware metadata file.
// If configFilePath is empty, use the default context aware metadata location.
func NewSecureConnectProvider(configFilePath string) (Provider, error) {
if configFilePath == "" {
user, err := user.Current()
if err != nil {
// Error locating the default config means Secure Connect is not supported.
return nil, errSourceUnavailable
}
configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile)
}
file, err := os.ReadFile(configFilePath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// Config file missing means Secure Connect is not supported.
return nil, errSourceUnavailable
}
return nil, err
}
var metadata secureConnectMetadata
if err := json.Unmarshal(file, &metadata); err != nil {
return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err)
}
if err := validateMetadata(metadata); err != nil {
return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err)
}
return (&secureConnectSource{
metadata: metadata,
}).getClientCertificate, nil
}
func validateMetadata(metadata secureConnectMetadata) error {
if len(metadata.Cmd) == 0 {
return errors.New("empty cert_provider_command")
}
return nil
}
func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
s.cachedCertMutex.Lock()
defer s.cachedCertMutex.Unlock()
if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) {
return s.cachedCert, nil
}
// Expand OS environment variables in the cert provider command such as "$HOME".
for i := 0; i < len(s.metadata.Cmd); i++ {
s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i])
}
command := s.metadata.Cmd
data, err := exec.Command(command[0], command[1:]...).Output()
if err != nil {
return nil, err
}
cert, err := tls.X509KeyPair(data, data)
if err != nil {
return nil, err
}
s.cachedCert = &cert
return &cert, nil
}
// isCertificateExpired returns true if the given cert is expired or invalid.
func isCertificateExpired(cert *tls.Certificate) bool {
if len(cert.Certificate) == 0 {
return true
}
parsed, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return true
}
return time.Now().After(parsed.NotAfter)
}

View file

@ -0,0 +1,189 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"encoding/json"
"log"
"os"
"strconv"
"sync"
"time"
"cloud.google.com/go/auth/internal/transport/cert"
"cloud.google.com/go/compute/metadata"
)
const (
configEndpointSuffix = "instance/platform-security/auto-mtls-configuration"
)
var (
// The period an MTLS config can be reused before needing refresh.
configExpiry = time.Hour
// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection.
mtlsEndpointEnabledForS2A = func() bool {
// TODO(xmenxk): determine this via discovery config.
return true
}
// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source.
mtlsOnce sync.Once
)
// GetS2AAddress returns the S2A address to be reached via plaintext connection.
// Returns empty string if not set or invalid.
func GetS2AAddress() string {
c, err := getMetadataMTLSAutoConfig().Config()
if err != nil {
return ""
}
if !c.Valid() {
return ""
}
return c.S2A.PlaintextAddress
}
type mtlsConfigSource interface {
Config() (*mtlsConfig, error)
}
// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs.
type mtlsConfig struct {
S2A *s2aAddresses `json:"s2a"`
Expiry time.Time
}
func (c *mtlsConfig) Valid() bool {
return c != nil && c.S2A != nil && !c.expired()
}
func (c *mtlsConfig) expired() bool {
return c.Expiry.Before(time.Now())
}
// s2aAddresses contains the plaintext and/or MTLS S2A addresses.
type s2aAddresses struct {
// PlaintextAddress is the plaintext address to reach S2A
PlaintextAddress string `json:"plaintext_address"`
// MTLSAddress is the MTLS address to reach S2A
MTLSAddress string `json:"mtls_address"`
}
// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh.
func getMetadataMTLSAutoConfig() mtlsConfigSource {
mtlsOnce.Do(func() {
mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{
src: &metadataMTLSAutoConfig{},
}
})
return mdsMTLSAutoConfigSource
}
// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry.
// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig.
type reuseMTLSConfigSource struct {
src mtlsConfigSource // src.Config() is called when config is expired
mu sync.Mutex // mutex guards config
config *mtlsConfig // cached config
}
func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) {
cs.mu.Lock()
defer cs.mu.Unlock()
if cs.config.Valid() {
return cs.config, nil
}
c, err := cs.src.Config()
if err != nil {
return nil, err
}
cs.config = c
return c, nil
}
// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource
// It has the logic to query MDS and return an mtlsConfig
type metadataMTLSAutoConfig struct{}
var httpGetMetadataMTLSConfig = func() (string, error) {
return metadata.Get(configEndpointSuffix)
}
func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) {
resp, err := httpGetMetadataMTLSConfig()
if err != nil {
log.Printf("querying MTLS config from MDS endpoint failed: %v", err)
return defaultMTLSConfig(), nil
}
var config mtlsConfig
err = json.Unmarshal([]byte(resp), &config)
if err != nil {
log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err)
return defaultMTLSConfig(), nil
}
if config.S2A == nil {
log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config)
return defaultMTLSConfig(), nil
}
// set new expiry
config.Expiry = time.Now().Add(configExpiry)
return &config, nil
}
func defaultMTLSConfig() *mtlsConfig {
return &mtlsConfig{
S2A: &s2aAddresses{
PlaintextAddress: "",
MTLSAddress: "",
},
Expiry: time.Now().Add(configExpiry),
}
}
func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool {
// If client cert is found, use that over S2A.
if clientCertSource != nil {
return false
}
// If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A.
if !isGoogleS2AEnabled() {
return false
}
// If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A.
if opts.DefaultMTLSEndpoint == "" && opts.Endpoint == "" {
return false
}
// If MTLS is not enabled for this endpoint, skip S2A.
if !mtlsEndpointEnabledForS2A() {
return false
}
// If custom HTTP client is provided, skip S2A.
if opts.Client != nil {
return false
}
return true
}
func isGoogleS2AEnabled() bool {
b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv))
if err != nil {
return false
}
return b
}

View file

@ -0,0 +1,76 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport provided internal helpers for the two transport packages
// (grpctransport and httptransport).
package transport
import (
"fmt"
"cloud.google.com/go/auth/credentials"
)
// CloneDetectOptions clones a user set detect option into some new memory that
// we can internally manipulate before sending onto the detect package.
func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions {
if oldDo == nil {
// it is valid for users not to set this, but we will need to to default
// some options for them in this case so return some initialized memory
// to work with.
return &credentials.DetectOptions{}
}
newDo := &credentials.DetectOptions{
// Simple types
Audience: oldDo.Audience,
Subject: oldDo.Subject,
EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
TokenURL: oldDo.TokenURL,
STSAudience: oldDo.STSAudience,
CredentialsFile: oldDo.CredentialsFile,
UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
UniverseDomain: oldDo.UniverseDomain,
// These fields are are pointer types that we just want to use exactly
// as the user set, copy the ref
Client: oldDo.Client,
AuthHandlerOptions: oldDo.AuthHandlerOptions,
}
// Smartly size this memory and copy below.
if oldDo.CredentialsJSON != nil {
newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON))
copy(newDo.CredentialsJSON, oldDo.CredentialsJSON)
}
if oldDo.Scopes != nil {
newDo.Scopes = make([]string, len(oldDo.Scopes))
copy(newDo.Scopes, oldDo.Scopes)
}
return newDo
}
// ValidateUniverseDomain verifies that the universe domain configured for the
// client matches the universe domain configured for the credentials.
func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error {
if clientUniverseDomain != credentialsUniverseDomain {
return fmt.Errorf(
"the configured universe domain (%q) does not match the universe "+
"domain found in the credentials (%q). If you haven't configured "+
"the universe domain explicitly, \"googleapis.com\" is the default",
clientUniverseDomain,
credentialsUniverseDomain)
}
return nil
}

33
vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md generated vendored Normal file
View file

@ -0,0 +1,33 @@
# Changelog
## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18)
### Bug Fixes
* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800)
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16)
### Features
* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233))
### Bug Fixes
* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
## 0.1.0 (2023-10-19)
### Features
* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f))
* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70))
### Bug Fixes
* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))

202
vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,164 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth]
// and [golang.org/x/oauth2].
package oauth2adapt
import (
"context"
"encoding/json"
"errors"
"cloud.google.com/go/auth"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
// into a [cloud.google.com/go/auth.TokenProvider].
func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
return &tokenProviderAdapter{ts: ts}
}
type tokenProviderAdapter struct {
ts oauth2.TokenSource
}
// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It
// is a light wrapper around the underlying TokenSource.
func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
tok, err := tp.ts.Token()
if err != nil {
var err2 *oauth2.RetrieveError
if ok := errors.As(err, &err2); ok {
return nil, AuthErrorFromRetrieveError(err2)
}
return nil, err
}
return &auth.Token{
Value: tok.AccessToken,
Type: tok.Type(),
Expiry: tok.Expiry,
}, nil
}
// TokenSourceFromTokenProvider converts any
// [cloud.google.com/go/auth.TokenProvider] into a
// [golang.org/x/oauth2.TokenSource].
func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource {
return &tokenSourceAdapter{tp: tp}
}
type tokenSourceAdapter struct {
tp auth.TokenProvider
}
// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It
// is a light wrapper around the underlying TokenProvider.
func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
tok, err := ts.tp.Token(context.Background())
if err != nil {
var err2 *auth.Error
if ok := errors.As(err, &err2); ok {
return nil, AddRetrieveErrorToAuthError(err2)
}
return nil, err
}
return &oauth2.Token{
AccessToken: tok.Value,
TokenType: tok.Type,
Expiry: tok.Expiry,
}, nil
}
// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]
// to a [cloud.google.com/go/auth.Credentials].
func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials {
if creds == nil {
return nil
}
return auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: TokenProviderFromTokenSource(creds.TokenSource),
JSON: creds.JSON,
ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
return creds.ProjectID, nil
}),
UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
return creds.GetUniverseDomain()
}),
})
}
// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials]
// to a [golang.org/x/oauth2/google.Credentials].
func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials {
if creds == nil {
return nil
}
// Throw away errors as old credentials are not request aware. Also, no
// network requests are currently happening for this use case.
projectID, _ := creds.ProjectID(context.Background())
return &google.Credentials{
TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider),
ProjectID: projectID,
JSON: creds.JSON(),
UniverseDomainProvider: func() (string, error) {
return creds.UniverseDomain(context.Background())
},
}
}
type oauth2Error struct {
ErrorCode string `json:"error"`
ErrorDescription string `json:"error_description"`
ErrorURI string `json:"error_uri"`
}
// AddRetrieveErrorToAuthError returns the same error provided and adds a
// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the
// [cloud.google.com/go/auth.Error].
func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error {
if err == nil {
return nil
}
e := &oauth2.RetrieveError{
Response: err.Response,
Body: err.Body,
}
err.Err = e
if len(err.Body) > 0 {
var oErr oauth2Error
// ignore the error as it only fills in extra details
json.Unmarshal(err.Body, &oErr)
e.ErrorCode = oErr.ErrorCode
e.ErrorDescription = oErr.ErrorDescription
e.ErrorURI = oErr.ErrorURI
}
return err
}
// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that
// wraps the provided [golang.org/x/oauth2.RetrieveError].
func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error {
if err == nil {
return nil
}
return &auth.Error{
Response: err.Response,
Body: err.Body,
Err: err,
}
}

373
vendor/cloud.google.com/go/auth/threelegged.go generated vendored Normal file
View file

@ -0,0 +1,373 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"cloud.google.com/go/auth/internal"
)
// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
// OAuth consent at the specified auth code URL and returns an auth code and
// state upon approval.
type AuthorizationHandler func(authCodeURL string) (code string, state string, err error)
// Options3LO are the options for doing a 3-legged OAuth2 flow.
type Options3LO struct {
// ClientID is the application's ID.
ClientID string
// ClientSecret is the application's secret. Not required if AuthHandlerOpts
// is set.
ClientSecret string
// AuthURL is the URL for authenticating.
AuthURL string
// TokenURL is the URL for retrieving a token.
TokenURL string
// AuthStyle is used to describe how to client info in the token request.
AuthStyle Style
// RefreshToken is the token used to refresh the credential. Not required
// if AuthHandlerOpts is set.
RefreshToken string
// RedirectURL is the URL to redirect users to. Optional.
RedirectURL string
// Scopes specifies requested permissions for the Token. Optional.
Scopes []string
// URLParams are the set of values to apply to the token exchange. Optional.
URLParams url.Values
// Client is the client to be used to make the underlying token requests.
// Optional.
Client *http.Client
// EarlyTokenExpiry is the time before the token expires that it should be
// refreshed. If not set the default value is 10 seconds. Optional.
EarlyTokenExpiry time.Duration
// AuthHandlerOpts provides a set of options for doing a
// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
AuthHandlerOpts *AuthorizationHandlerOptions
}
func (o *Options3LO) validate() error {
if o == nil {
return errors.New("auth: options must be provided")
}
if o.ClientID == "" {
return errors.New("auth: client ID must be provided")
}
if o.AuthHandlerOpts == nil && o.ClientSecret == "" {
return errors.New("auth: client secret must be provided")
}
if o.AuthURL == "" {
return errors.New("auth: auth URL must be provided")
}
if o.TokenURL == "" {
return errors.New("auth: token URL must be provided")
}
if o.AuthStyle == StyleUnknown {
return errors.New("auth: auth style must be provided")
}
if o.AuthHandlerOpts == nil && o.RefreshToken == "" {
return errors.New("auth: refresh token must be provided")
}
return nil
}
// PKCEOptions holds parameters to support PKCE.
type PKCEOptions struct {
// Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier.
// ChallengeMethod is the encryption method (ex. S256).
ChallengeMethod string
// Verifier is the original, non-encrypted secret.
Verifier string // The original, non-encrypted secret.
}
type tokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
// error fields
ErrorCode string `json:"error"`
ErrorDescription string `json:"error_description"`
ErrorURI string `json:"error_uri"`
}
func (e *tokenJSON) expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
return
}
func (o *Options3LO) client() *http.Client {
if o.Client != nil {
return o.Client
}
return internal.CloneDefaultClient()
}
// authCodeURL returns a URL that points to a OAuth2 consent page.
func (o *Options3LO) authCodeURL(state string, values url.Values) string {
var buf bytes.Buffer
buf.WriteString(o.AuthURL)
v := url.Values{
"response_type": {"code"},
"client_id": {o.ClientID},
}
if o.RedirectURL != "" {
v.Set("redirect_uri", o.RedirectURL)
}
if len(o.Scopes) > 0 {
v.Set("scope", strings.Join(o.Scopes, " "))
}
if state != "" {
v.Set("state", state)
}
if o.AuthHandlerOpts != nil {
if o.AuthHandlerOpts.PKCEOpts != nil &&
o.AuthHandlerOpts.PKCEOpts.Challenge != "" {
v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge)
}
if o.AuthHandlerOpts.PKCEOpts != nil &&
o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" {
v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod)
}
}
for k := range values {
v.Set(k, v.Get(k))
}
if strings.Contains(o.AuthURL, "?") {
buf.WriteByte('&')
} else {
buf.WriteByte('?')
}
buf.WriteString(v.Encode())
return buf.String()
}
// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2
// configuration. The TokenProvider is caches and auto-refreshes tokens by
// default.
func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) {
if err := opts.validate(); err != nil {
return nil, err
}
if opts.AuthHandlerOpts != nil {
return new3LOTokenProviderWithAuthHandler(opts), nil
}
return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenExpiry,
}), nil
}
// AuthorizationHandlerOptions provides a set of options to specify for doing a
// 3-legged OAuth2 flow with a custom [AuthorizationHandler].
type AuthorizationHandlerOptions struct {
// AuthorizationHandler specifies the handler used to for the authorization
// part of the flow.
Handler AuthorizationHandler
// State is used verify that the "state" is identical in the request and
// response before exchanging the auth code for OAuth2 token.
State string
// PKCEOpts allows setting configurations for PKCE. Optional.
PKCEOpts *PKCEOptions
}
func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider {
return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenExpiry,
})
}
// exchange handles the final exchange portion of the 3lo flow. Returns a Token,
// refreshToken, and error.
func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) {
// Build request
v := url.Values{
"grant_type": {"authorization_code"},
"code": {code},
}
if o.RedirectURL != "" {
v.Set("redirect_uri", o.RedirectURL)
}
if o.AuthHandlerOpts != nil &&
o.AuthHandlerOpts.PKCEOpts != nil &&
o.AuthHandlerOpts.PKCEOpts.Verifier != "" {
v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier)
}
for k := range o.URLParams {
v.Set(k, o.URLParams.Get(k))
}
return fetchToken(ctx, o, v)
}
// This struct is not safe for concurrent access alone, but the way it is used
// in this package by wrapping it with a cachedTokenProvider makes it so.
type tokenProvider3LO struct {
opts *Options3LO
client *http.Client
refreshToken string
}
func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) {
if tp.refreshToken == "" {
return nil, errors.New("auth: token expired and refresh token is not set")
}
v := url.Values{
"grant_type": {"refresh_token"},
"refresh_token": {tp.refreshToken},
}
for k := range tp.opts.URLParams {
v.Set(k, tp.opts.URLParams.Get(k))
}
tk, rt, err := fetchToken(ctx, tp.opts, v)
if err != nil {
return nil, err
}
if tp.refreshToken != rt && rt != "" {
tp.refreshToken = rt
}
return tk, err
}
type tokenProviderWithHandler struct {
opts *Options3LO
state string
}
func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) {
url := tp.opts.authCodeURL(tp.state, nil)
code, state, err := tp.opts.AuthHandlerOpts.Handler(url)
if err != nil {
return nil, err
}
if state != tp.state {
return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow")
}
tok, _, err := tp.opts.exchange(ctx, code)
return tok, err
}
// fetchToken returns a Token, refresh token, and/or an error.
func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) {
var refreshToken string
if o.AuthStyle == StyleInParams {
if o.ClientID != "" {
v.Set("client_id", o.ClientID)
}
if o.ClientSecret != "" {
v.Set("client_secret", o.ClientSecret)
}
}
req, err := http.NewRequest("POST", o.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, refreshToken, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if o.AuthStyle == StyleInHeader {
req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
}
// Make request
r, err := o.client().Do(req.WithContext(ctx))
if err != nil {
return nil, refreshToken, err
}
body, err := internal.ReadAll(r.Body)
r.Body.Close()
if err != nil {
return nil, refreshToken, fmt.Errorf("auth: cannot fetch token: %w", err)
}
failureStatus := r.StatusCode < 200 || r.StatusCode > 299
tokError := &Error{
Response: r,
Body: body,
}
var token *Token
// errors ignored because of default switch on content
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
switch content {
case "application/x-www-form-urlencoded", "text/plain":
// some endpoints return a query string
vals, err := url.ParseQuery(string(body))
if err != nil {
if failureStatus {
return nil, refreshToken, tokError
}
return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err)
}
tokError.code = vals.Get("error")
tokError.description = vals.Get("error_description")
tokError.uri = vals.Get("error_uri")
token = &Token{
Value: vals.Get("access_token"),
Type: vals.Get("token_type"),
Metadata: make(map[string]interface{}, len(vals)),
}
for k, v := range vals {
token.Metadata[k] = v
}
refreshToken = vals.Get("refresh_token")
e := vals.Get("expires_in")
expires, _ := strconv.Atoi(e)
if expires != 0 {
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
}
default:
var tj tokenJSON
if err = json.Unmarshal(body, &tj); err != nil {
if failureStatus {
return nil, refreshToken, tokError
}
return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err)
}
tokError.code = tj.ErrorCode
tokError.description = tj.ErrorDescription
tokError.uri = tj.ErrorURI
token = &Token{
Value: tj.AccessToken,
Type: tj.TokenType,
Expiry: tj.expiry(),
Metadata: make(map[string]interface{}),
}
json.Unmarshal(body, &token.Metadata) // optional field, skip err check
refreshToken = tj.RefreshToken
}
// according to spec, servers should respond status 400 in error case
// https://www.rfc-editor.org/rfc/rfc6749#section-5.2
// but some unorthodox servers respond 200 in error case
if failureStatus || tokError.code != "" {
return nil, refreshToken, tokError
}
if token.Value == "" {
return nil, refreshToken, errors.New("auth: server response missing access_token")
}
return token, refreshToken, nil
}

View file

@ -1,5 +1,12 @@
# Changes
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15)
### Features
* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510))
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)

View file

@ -23,7 +23,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net"
"net/http"
"net/url"
@ -95,9 +95,9 @@ func (c *cachedValue) get(cl *Client) (v string, err error) {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
v, err = cl.getTrimmed(context.Background(), c.k)
} else {
v, err = cl.Get(c.k)
v, err = cl.GetWithContext(context.Background(), c.k)
}
if err == nil {
c.v = v
@ -197,18 +197,32 @@ func systemInfoSuggestsGCE() bool {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on the default client.
// Subscribe calls Client.SubscribeWithContext on the default client.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return defaultClient.Subscribe(suffix, fn)
return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// SubscribeWithContext calls Client.SubscribeWithContext on the default client.
func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
return defaultClient.SubscribeWithContext(ctx, suffix, fn)
}
// Get calls Client.GetWithContext on the default client.
//
// Deprecated: Please use the context aware variant [GetWithContext].
func Get(suffix string) (string, error) {
return defaultClient.GetWithContext(context.Background(), suffix)
}
// GetWithContext calls Client.GetWithContext on the default client.
func GetWithContext(ctx context.Context, suffix string) (string, error) {
return defaultClient.GetWithContext(ctx, suffix)
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
@ -288,8 +302,7 @@ func NewClient(c *http.Client) *Client {
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
ctx := context.TODO()
func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
@ -306,7 +319,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
}
suffix = strings.TrimLeft(suffix, "/")
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, err := http.NewRequest("GET", u, nil)
req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
if err != nil {
return "", "", err
}
@ -336,7 +349,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := ioutil.ReadAll(res.Body)
all, err := io.ReadAll(res.Body)
if err != nil {
return "", "", err
}
@ -354,19 +367,33 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
//
// Deprecated: Please use the context aware variant [Client.GetWithContext].
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return c.GetWithContext(context.Background(), suffix)
}
// GetWithContext returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) {
val, _, err := c.getETag(ctx, suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) {
s, err = c.GetWithContext(ctx, suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
j, err := c.GetWithContext(context.Background(), suffix)
if err != nil {
return nil, err
}
@ -388,7 +415,7 @@ func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
@ -398,25 +425,25 @@ func (c *Client) Email(serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
return c.getTrimmed(context.Background(), "instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
j, err := c.GetWithContext(context.Background(), "instance/tags")
if err != nil {
return nil, err
}
@ -428,12 +455,12 @@ func (c *Client) InstanceTags() ([]string, error) {
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
return c.getTrimmed("instance/name")
return c.getTrimmed(context.Background(), "instance/name")
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
zone, err := c.getTrimmed(context.Background(), "instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
@ -460,7 +487,7 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
return c.GetWithContext(context.Background(), "instance/attributes/"+attr)
}
// ProjectAttributeValue returns the value of the provided
@ -472,7 +499,7 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) {
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
return c.GetWithContext(context.Background(), "project/attributes/"+attr)
}
// Scopes returns the service account scopes for the given account.
@ -489,21 +516,30 @@ func (c *Client) Scopes(serviceAccount string) ([]string, error) {
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
// Deprecated: Please use the context aware variant [Client.SubscribeWithContext].
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
}
// SubscribeWithContext subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// SubscribeWithContext calls fn with the latest metadata value indicated by the
// provided suffix. If the metadata value is deleted, fn is called with the
// empty string and ok false. Subscribe blocks until fn returns a non-nil error
// or the value is deleted. Subscribe returns the error value returned from the
// last call to fn, which may be nil when ok == false.
func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
val, lastETag, err := c.getETag(ctx, suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
if err := fn(ctx, val, true); err != nil {
return err
}
@ -514,7 +550,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
@ -524,7 +560,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
if err := fn(ctx, val, ok); err != nil || !ok {
return err
}
}

View file

@ -27,7 +27,7 @@ const (
)
var (
syscallRetryable = func(err error) bool { return false }
syscallRetryable = func(error) bool { return false }
)
// defaultBackoff is basically equivalent to gax.Backoff without the need for

View file

@ -6905,6 +6905,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -6932,6 +6935,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -7044,6 +7050,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -7071,6 +7080,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -17098,6 +17110,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "kafka-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-west-1",
}: endpoint{},
endpointKey{
Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "kafka-fips.ca-west-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -17131,6 +17152,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ca-west-1",
}: endpoint{
Hostname: "kafka-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@ -26792,6 +26822,44 @@ var awsPartition = partition{
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{
Hostname: "s3-control.af-south-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "af-south-1",
},
},
endpointKey{
Region: "af-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.af-south-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "af-south-1",
},
},
endpointKey{
Region: "ap-east-1",
}: endpoint{
Hostname: "s3-control.ap-east-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-east-1",
},
},
endpointKey{
Region: "ap-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-east-1",
},
},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
@ -26868,6 +26936,25 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
endpointKey{
Region: "ap-south-2",
}: endpoint{
Hostname: "s3-control.ap-south-2.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-south-2",
},
},
endpointKey{
Region: "ap-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-south-2",
},
},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@ -26906,6 +26993,44 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{
Hostname: "s3-control.ap-southeast-3.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-southeast-3",
},
},
endpointKey{
Region: "ap-southeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-southeast-3",
},
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
Hostname: "s3-control.ap-southeast-4.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-southeast-4",
},
},
endpointKey{
Region: "ap-southeast-4",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "ap-southeast-4",
},
},
endpointKey{
Region: "ca-central-1",
}: endpoint{
@ -26974,6 +27099,25 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "s3-control.eu-central-2.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "eu-central-2",
},
},
endpointKey{
Region: "eu-central-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "eu-central-2",
},
},
endpointKey{
Region: "eu-north-1",
}: endpoint{
@ -26993,6 +27137,44 @@ var awsPartition = partition{
Region: "eu-north-1",
},
},
endpointKey{
Region: "eu-south-1",
}: endpoint{
Hostname: "s3-control.eu-south-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "eu-south-1",
},
},
endpointKey{
Region: "eu-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "eu-south-1",
},
},
endpointKey{
Region: "eu-south-2",
}: endpoint{
Hostname: "s3-control.eu-south-2.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "eu-south-2",
},
},
endpointKey{
Region: "eu-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "eu-south-2",
},
},
endpointKey{
Region: "eu-west-1",
}: endpoint{
@ -27050,6 +27232,63 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "s3-control.il-central-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "il-central-1",
},
},
endpointKey{
Region: "il-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.il-central-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "il-central-1",
},
},
endpointKey{
Region: "me-central-1",
}: endpoint{
Hostname: "s3-control.me-central-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "me-central-1",
},
},
endpointKey{
Region: "me-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.me-central-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "me-central-1",
},
},
endpointKey{
Region: "me-south-1",
}: endpoint{
Hostname: "s3-control.me-south-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "me-south-1",
},
},
endpointKey{
Region: "me-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.me-south-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "me-south-1",
},
},
endpointKey{
Region: "sa-east-1",
}: endpoint{
@ -28362,21 +28601,85 @@ var awsPartition = partition{
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-east-1-fips",
}: endpoint{
Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-2",
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-east-2-fips",
}: endpoint{
Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-1",
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-west-1-fips",
}: endpoint{
Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-west-2-fips",
}: endpoint{
Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
},
},
"servicecatalog": service{
@ -32908,6 +33211,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-west-1",
}: endpoint{},
endpointKey{
Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -32941,6 +33253,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ca-west-1",
}: endpoint{
Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@ -32977,6 +33298,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -38223,13 +38547,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
Protocols: []string{"http", "https"},
},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
Protocols: []string{"http", "https"},
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
Protocols: []string{"http", "https"},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
Deprecated: boxedTrue,
},
},
},
"backup": service{
@ -38943,9 +39299,39 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
},
},
"data-ats.iot": service{
@ -43927,6 +44313,46 @@ var awsusgovPartition = partition{
},
},
},
"verifiedpermissions": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
},
},
},
"waf-regional": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -45237,6 +45663,114 @@ var awsisoPartition = partition{
},
},
},
"s3-control": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
Protocols: []string{"https"},
SignatureVersions: []string{"s3v4"},
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{
Hostname: "s3-control.us-iso-east-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
},
endpointKey{
Region: "us-iso-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
},
endpointKey{
Region: "us-iso-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
},
endpointKey{
Region: "us-iso-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
},
endpointKey{
Region: "us-iso-east-1-fips",
}: endpoint{
Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{
Hostname: "s3-control.us-iso-west-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
},
endpointKey{
Region: "us-iso-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
},
endpointKey{
Region: "us-iso-west-1-fips",
}: endpoint{
Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
},
},
"s3-outposts": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -45269,6 +45803,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
},
},
"sns": service{
@ -45787,6 +46324,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
"firehose": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
},
},
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -46107,6 +46651,65 @@ var awsisobPartition = partition{
},
},
},
"s3-control": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
Protocols: []string{"https"},
SignatureVersions: []string{"s3v4"},
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-isob-east-1",
}: endpoint{
Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
},
endpointKey{
Region: "us-isob-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
},
endpointKey{
Region: "us-isob-east-1-fips",
}: endpoint{
Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
},
},
"s3-outposts": service{
Endpoints: serviceEndpoints{
endpointKey{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.51.19"
const SDKVersion = "1.51.25"

View file

@ -4006,6 +4006,11 @@ func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput
// enters the cancelled_running state and the instances continue to run until
// they are interrupted or you terminate them manually.
//
// Restrictions
//
// - You can delete up to 100 fleets in a single request. If you exceed the
// specified number, no fleets are deleted.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@ -11804,17 +11809,22 @@ func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Reques
// manually.
//
// For instant fleets, EC2 Fleet must terminate the instances when the fleet
// is deleted. A deleted instant fleet with running instances is not supported.
// is deleted. Up to 1000 instances can be terminated in a single request to
// delete instant fleets. A deleted instant fleet with running instances is
// not supported.
//
// Restrictions
//
// - You can delete up to 25 instant fleets in a single request. If you exceed
// this number, no instant fleets are deleted and an error is returned. There
// is no restriction on the number of fleets of type maintain or request
// that can be deleted in a single request.
// - You can delete up to 25 fleets of type instant in a single request.
//
// - Up to 1000 instances can be terminated in a single request to delete
// instant fleets.
// - You can delete up to 100 fleets of type maintain or request in a single
// request.
//
// - You can delete up to 125 fleets in a single request, provided you do
// not exceed the quota for each fleet type, as specified above.
//
// - If you exceed the specified number of fleets to delete, no fleets are
// deleted.
//
// For more information, see Delete an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet)
// in the Amazon EC2 User Guide.
@ -23467,9 +23477,9 @@ func (c *EC2) DescribeInstanceTypeOfferingsRequest(input *DescribeInstanceTypeOf
// DescribeInstanceTypeOfferings API operation for Amazon Elastic Compute Cloud.
//
// Returns a list of all instance types offered. The results can be filtered
// by location (Region or Availability Zone). If no location is specified, the
// instance types offered in the current Region are returned.
// Lists the instance types that are offered for the specified location. If
// no location is specified, the default is to list the instance types that
// are offered in the current Region.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -23599,8 +23609,8 @@ func (c *EC2) DescribeInstanceTypesRequest(input *DescribeInstanceTypesInput) (r
// DescribeInstanceTypes API operation for Amazon Elastic Compute Cloud.
//
// Describes the details of the instance types that are offered in a location.
// The results can be filtered by the attributes of the instance types.
// Describes the specified instance types. By default, all instance types for
// the current Region are described. Alternatively, you can filter the results.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -40523,6 +40533,9 @@ func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req
//
// The returned content is Base64-encoded.
//
// For more information, see Instance console output (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/troubleshoot-unreachable-instance.html#instance-console-console-output)
// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@ -47291,9 +47304,9 @@ func (c *EC2) ModifyInstanceMetadataDefaultsRequest(input *ModifyInstanceMetadat
// level in the specified Amazon Web Services Region.
//
// To remove a parameter's account-level default setting, specify no-preference.
// At instance launch, the value will come from the AMI, or from the launch
// parameter if specified. For more information, see Order of precedence for
// instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html#instance-metadata-options-order-of-precedence)
// If an account-level setting is cleared with no-preference, then the instance
// launch considers the other instance metadata settings. For more information,
// see Order of precedence for instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html#instance-metadata-options-order-of-precedence)
// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@ -65615,6 +65628,8 @@ type CancelSpotFleetRequestsInput struct {
// The IDs of the Spot Fleet requests.
//
// Constraint: You can specify up to 100 IDs in a single request.
//
// SpotFleetRequestIds is a required field
SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"`
@ -71954,11 +71969,11 @@ type CreateFleetError struct {
_ struct{} `type:"structure"`
// The error code that indicates why the instance could not be launched. For
// more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
// more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html).
ErrorCode *string `locationName:"errorCode" type:"string"`
// The error message that describes why the instance could not be launched.
// For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
// For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html).
ErrorMessage *string `locationName:"errorMessage" type:"string"`
// The launch templates and overrides that were used for launching the instances.
@ -84237,6 +84252,9 @@ type DeleteFleetsInput struct {
// The IDs of the EC2 Fleets.
//
// Constraints: In a single request, you can specify up to 25 instant fleet
// IDs and up to 100 maintain or request fleet IDs.
//
// FleetIds is a required field
FleetIds []*string `locationName:"FleetId" type:"list" required:"true"`
@ -98574,14 +98592,26 @@ type DescribeInstanceTypeOfferingsInput struct {
// One or more filters. Filter names and values are case-sensitive.
//
// * location - This depends on the location type. For example, if the location
// type is region (default), the location is the Region code (for example,
// us-east-2.)
// * instance-type - The instance type. For a list of possible values, see
// Instance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Instance.html).
//
// * instance-type - The instance type. For example, c5.2xlarge.
// * location - The location. For a list of possible identifiers, see Regions
// and Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html).
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The location type.
//
// * availability-zone - The Availability Zone. When you specify a location
// filter, it must be an Availability Zone for the current Region.
//
// * availability-zone-id - The AZ ID. When you specify a location filter,
// it must be an AZ ID for the current Region.
//
// * outpost - The Outpost ARN. When you specify a location filter, it must
// be an Outpost ARN for the current Region.
//
// * region - The current Region. If you specify a location filter, it must
// match the current Region.
LocationType *string `type:"string" enum:"LocationType"`
// The maximum number of items to return for this request. To get the next page
@ -98658,7 +98688,7 @@ func (s *DescribeInstanceTypeOfferingsInput) SetNextToken(v string) *DescribeIns
type DescribeInstanceTypeOfferingsOutput struct {
_ struct{} `type:"structure"`
// The instance types offered.
// The instance types offered in the location.
InstanceTypeOfferings []*InstanceTypeOffering `locationName:"instanceTypeOfferingSet" locationNameList:"item" type:"list"`
// The token to include in another request to get the next page of items. This
@ -98850,8 +98880,7 @@ type DescribeInstanceTypesInput struct {
// can be configured for the instance type. For example, "1" or "1,2".
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The instance types. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
// in the Amazon EC2 User Guide.
// The instance types.
InstanceTypes []*string `locationName:"InstanceType" type:"list" enum:"InstanceType"`
// The maximum number of items to return for this request. To get the next page
@ -98928,8 +98957,7 @@ func (s *DescribeInstanceTypesInput) SetNextToken(v string) *DescribeInstanceTyp
type DescribeInstanceTypesOutput struct {
_ struct{} `type:"structure"`
// The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
// in the Amazon EC2 User Guide.
// The instance type.
InstanceTypes []*InstanceTypeInfo `locationName:"instanceTypeSet" locationNameList:"item" type:"list"`
// The token to include in another request to get the next page of items. This
@ -150096,11 +150124,10 @@ type ModifyInstanceMetadataDefaultsInput struct {
// instance metadata can't be accessed.
HttpEndpoint *string `type:"string" enum:"DefaultInstanceMetadataEndpointState"`
// The maximum number of hops that the metadata token can travel.
// The maximum number of hops that the metadata token can travel. To indicate
// no preference, specify -1.
//
// Minimum: 1
//
// Maximum: 64
// Possible values: Integers from 1 to 64, and -1 to indicate no preference
HttpPutResponseHopLimit *int64 `type:"integer"`
// Indicates whether IMDSv2 is required.
@ -167250,8 +167277,7 @@ type RequestLaunchTemplateData struct {
// The monitoring for the instance.
Monitoring *LaunchTemplatesMonitoringRequest `type:"structure"`
// One or more network interfaces. If you specify a network interface, you must
// specify any security groups and subnets as part of the network interface.
// The network interfaces for the instance.
NetworkInterfaces []*LaunchTemplateInstanceNetworkInterfaceSpecificationRequest `locationName:"NetworkInterface" locationNameList:"InstanceNetworkInterfaceSpecification" type:"list"`
// The placement for the instance.
@ -167268,12 +167294,17 @@ type RequestLaunchTemplateData struct {
// in the Amazon Elastic Compute Cloud User Guide.
RamDiskId *string `type:"string"`
// One or more security group IDs. You can create a security group using CreateSecurityGroup
// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html).
// The IDs of the security groups.
//
// If you specify a network interface, you must specify any security groups
// as part of the network interface instead of using this parameter.
SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
// One or more security group names. For a nondefault VPC, you must use security
// The names of the security groups. For a nondefault VPC, you must use security
// group IDs instead.
//
// If you specify a network interface, you must specify any security groups
// as part of the network interface instead of using this parameter.
SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"`
// The tags to apply to the resources that are created during instance launch.
@ -171920,26 +171951,15 @@ type RunInstancesInput struct {
// Default: false
EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"`
// Deprecated.
// An elastic GPU to associate with the instance.
//
// Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads
// that require graphics acceleration, we recommend that you use Amazon EC2
// G4ad, G4dn, or G5 instances.
// Amazon Elastic Graphics reached end of life on January 8, 2024.
ElasticGpuSpecification []*ElasticGpuSpecification `locationNameList:"item" type:"list"`
// An elastic inference accelerator to associate with the instance. Elastic
// inference accelerators are a resource you can attach to your Amazon EC2 instances
// to accelerate your Deep Learning (DL) inference workloads.
// An elastic inference accelerator to associate with the instance.
//
// You cannot specify accelerators from different generations in the same request.
//
// Starting April 15, 2023, Amazon Web Services will not onboard new customers
// to Amazon Elastic Inference (EI), and will help current customers migrate
// their workloads to options that offer better price and performance. After
// April 15, 2023, new customers will not be able to launch instances with Amazon
// EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However,
// customers who have used Amazon EI at least once during the past 30-day period
// are considered current customers and will be able to continue using the service.
// Amazon Elastic Inference (EI) is no longer available to new customers. For
// more information, see Amazon Elastic Inference FAQs (http://aws.amazon.com/machine-learning/elastic-inference/faqs/).
ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"`
// If youre launching an instance into a dual-stack or IPv6-only subnet,
@ -172072,9 +172092,7 @@ type RunInstancesInput struct {
// Specifies whether detailed monitoring is enabled for the instance.
Monitoring *RunInstancesMonitoringEnabled `type:"structure"`
// The network interfaces to associate with the instance. If you specify a network
// interface, you must specify any security groups and subnets as part of the
// network interface.
// The network interfaces to associate with the instance.
NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"`
// The placement for the instance.
@ -172111,13 +172129,13 @@ type RunInstancesInput struct {
// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html).
//
// If you specify a network interface, you must specify any security groups
// as part of the network interface.
// as part of the network interface instead of using this parameter.
SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
// [Default VPC] The names of the security groups.
//
// If you specify a network interface, you must specify any security groups
// as part of the network interface.
// as part of the network interface instead of using this parameter.
//
// Default: Amazon EC2 uses the default security group.
SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"`
@ -172125,7 +172143,7 @@ type RunInstancesInput struct {
// The ID of the subnet to launch the instance into.
//
// If you specify a network interface, you must specify any subnets as part
// of the network interface.
// of the network interface instead of using this parameter.
SubnetId *string `type:"string"`
// The tags to apply to the resources that are created during instance launch.
@ -174219,7 +174237,8 @@ type SearchTransitGatewayRoutesInput struct {
// Filters is a required field
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"`
// The maximum number of routes to return.
// The maximum number of routes to return. If a value is not provided, the default
// is 1000.
MaxResults *int64 `min:"5" type:"integer"`
// The ID of the transit gateway route table.
@ -176549,11 +176568,11 @@ type SpotFleetLaunchSpecification struct {
// Enable or disable monitoring for the instances.
Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"`
// One or more network interfaces. If you specify a network interface, you must
// specify subnet IDs and security group IDs using the network interface.
// The network interfaces.
//
// SpotFleetLaunchSpecification currently does not support Elastic Fabric Adapter
// (EFA). To specify an EFA, you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html).
// SpotFleetLaunchSpecification does not support Elastic Fabric Adapter (EFA).
// You must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html)
// instead.
NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
// The placement information.
@ -176566,6 +176585,9 @@ type SpotFleetLaunchSpecification struct {
RamdiskId *string `locationName:"ramdiskId" type:"string"`
// The security groups.
//
// If you specify a network interface, you must specify any security groups
// as part of the network interface instead of using this parameter.
SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
// The maximum price per unit hour that you are willing to pay for a Spot Instance.
@ -176580,6 +176602,9 @@ type SpotFleetLaunchSpecification struct {
// The IDs of the subnets in which to launch the instances. To specify multiple
// subnets, separate them using commas; for example, "subnet-1234abcdeexample1,
// subnet-0987cdef6example2".
//
// If you specify a network interface, you must specify any subnets as part
// of the network interface instead of using this parameter.
SubnetId *string `locationName:"subnetId" type:"string"`
// The tags to apply during creation.

View file

@ -7,7 +7,8 @@ import (
type Arch uint64
const ( // architecture enum
ARCH_AARCH64 Arch = iota
ARCH_UNSET Arch = iota
ARCH_AARCH64
ARCH_PPC64LE
ARCH_S390X
ARCH_X86_64
@ -15,6 +16,8 @@ const ( // architecture enum
func (a Arch) String() string {
switch a {
case ARCH_UNSET:
return "unset"
case ARCH_AARCH64:
return "aarch64"
case ARCH_PPC64LE:

View file

@ -16,8 +16,6 @@ import (
_ "github.com/containers/image/v5/oci/layout"
"golang.org/x/sys/unix"
"github.com/osbuild/images/internal/common"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker"
@ -30,6 +28,9 @@ import (
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
)
const (
@ -334,6 +335,7 @@ func (cl *Client) UploadImage(ctx context.Context, from, tag string) (digest.Dig
type RawManifest struct {
Data []byte
MimeType string
Arch arch.Arch
}
// Digest computes the digest from the raw manifest data
@ -368,6 +370,17 @@ func (cl *Client) GetManifest(ctx context.Context, digest digest.Digest, local b
if err != nil {
return
}
img, err := ref.NewImage(ctx, cl.sysCtx)
if err != nil {
return r, err
}
defer img.Close()
info, err := img.Inspect(ctx)
if err != nil {
return r, err
}
r.Arch = arch.FromString(info.Architecture)
src, err := ref.NewImageSource(ctx, cl.sysCtx)
if err != nil {
@ -515,6 +528,7 @@ func (cl *Client) Resolve(ctx context.Context, name string, local bool) (Spec, e
name,
local,
)
spec.Arch = raw.Arch
return spec, nil
}

View file

@ -30,6 +30,7 @@ type SourceSpec struct {
Local bool
}
// XXX: use arch.Arch here?
func NewResolver(arch string) *Resolver {
return &Resolver{
ctx: context.Background(),

View file

@ -3,6 +3,8 @@ package container
import (
"github.com/containers/image/v5/docker/reference"
"github.com/opencontainers/go-digest"
"github.com/osbuild/images/pkg/arch"
)
// A Spec is the specification of how to get a specific
@ -18,6 +20,8 @@ type Spec struct {
LocalName string // name to use inside the image
ListDigest string // digest of the list manifest at the Source (optional)
LocalStorage bool
Arch arch.Arch // the architecture of the image
}
// NewSpec creates a new Spec from the essential information.

View file

@ -41,6 +41,9 @@ type Distro interface {
// Returns the name of the distro.
Name() string
// Returns the codename of the distro.
Codename() string
// Returns the release version of the distro. This is used in repo
// files on the host system and required for the subscription support.
Releasever() string

View file

@ -469,6 +469,10 @@ func (d *distribution) Name() string {
return d.name
}
func (d *distribution) Codename() string {
return "" // Fedora does not use distro codename
}
func (d *distribution) Releasever() string {
return d.releaseVersion
}

View file

@ -80,6 +80,10 @@ type ImageConfig struct {
// if no datastream value is provided by the user.
DefaultOSCAPDatastream *string
// NoBLS configures the image bootloader with traditional menu entries
// instead of BLS. Required for legacy systems like RHEL 7.
NoBLS *bool
// OSTree specific configuration
// Read only sysroot and boot

View file

@ -16,6 +16,7 @@ type DefaultDistroImageConfigFunc func(d *Distribution) *distro.ImageConfig
type Distribution struct {
name string
DistCodename string
product string
osVersion string
releaseVersion string
@ -34,6 +35,10 @@ func (d *Distribution) Name() string {
return d.name
}
func (d *Distribution) Codename() string {
return d.DistCodename
}
func (d *Distribution) Releasever() string {
return d.releaseVersion
}

View file

@ -134,6 +134,7 @@ func osCustomizations(
// Relabel the tree, unless the `NoSElinux` flag is explicitly set to `true`
if imageConfig.NoSElinux == nil || imageConfig.NoSElinux != nil && !*imageConfig.NoSElinux {
osc.SElinux = "targeted"
osc.SELinuxForceRelabel = imageConfig.SELinuxForceRelabel
}
if t.IsRHEL() && options.Facts != nil {
@ -272,6 +273,7 @@ func osCustomizations(
osc.Sysctld = imageConfig.Sysctld
osc.DNFConfig = imageConfig.DNFConfig
osc.DNFAutomaticConfig = imageConfig.DNFAutomaticConfig
osc.YUMConfig = imageConfig.YumConfig
osc.SshdConfig = imageConfig.SshdConfig
osc.AuthConfig = imageConfig.Authconfig
osc.PwQuality = imageConfig.PwQuality
@ -285,6 +287,10 @@ func osCustomizations(
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)
if imageConfig.NoBLS != nil {
osc.NoBLS = *imageConfig.NoBLS
}
return osc, nil
}
@ -391,6 +397,18 @@ func DiskImage(workload workload.Workload,
img.Filename = t.Filename()
img.VPCForceSize = t.DiskImageVPCForceSize
if img.OSCustomizations.NoBLS {
img.OSProduct = t.Arch().Distro().Product()
img.OSVersion = t.Arch().Distro().OsVersion()
img.OSNick = t.Arch().Distro().Codename()
}
if t.DiskImagePartTool != nil {
img.PartTool = *t.DiskImagePartTool
}
return img, nil
}

View file

@ -15,6 +15,7 @@ import (
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/image"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
)
@ -84,6 +85,10 @@ type ImageType struct {
UnsupportedPartitioningModes []disk.PartitioningMode
ISOLabelFn ISOLabelFunc
// TODO: determine a better place for these options, but for now they are here
DiskImagePartTool *osbuild.PartTool
DiskImageVPCForceSize *bool
}
func (t *ImageType) Name() string {
@ -305,6 +310,8 @@ func (t *ImageType) Manifest(bp *blueprint.Blueprint,
mf := manifest.New()
switch t.Arch().Distro().Releasever() {
case "7":
mf.Distro = manifest.DISTRO_EL7
case "8":
mf.Distro = manifest.DISTRO_EL8
case "9":

View file

@ -9,7 +9,7 @@ import (
)
// TODO: move these to the EC2 environment
const amiKernelOptions = "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295"
const amiKernelOptions = "console=tty0 console=ttyS0,115200n8 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295"
// default EC2 images config (common for all architectures)
func baseEc2ImageConfig() *distro.ImageConfig {

View file

@ -0,0 +1,289 @@
package rhel7
import (
"os"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/rpmmd"
)
const (
ec2KernelOptions = "ro console=tty0 console=ttyS0,115200n8 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto LANG=en_US.UTF-8"
)
func mkEc2ImgTypeX86_64() *rhel.ImageType {
it := rhel.NewImageType(
"ec2",
"image.raw.xz",
"application/xz",
map[string]rhel.PackageSetFunc{
rhel.OSPkgsKey: ec2PackageSet,
},
rhel.DiskImage,
[]string{"build"},
[]string{"os", "image", "xz"},
[]string{"xz"},
)
// all RHEL 7 images should use sgdisk
it.DiskImagePartTool = common.ToPtr(osbuild.PTSgdisk)
it.Compression = "xz"
it.DefaultImageConfig = ec2ImageConfig()
it.KernelOptions = ec2KernelOptions
it.Bootable = true
it.DefaultSize = 10 * common.GibiByte
it.BasePartitionTables = ec2PartitionTables
return it
}
// default EC2 images config (common for all architectures)
func ec2ImageConfig() *distro.ImageConfig {
// systemd-firstboot on el7 does not support --keymap option
vconsoleFile, err := fsnode.NewFile("/etc/vconsole.conf", nil, nil, nil, []byte("FONT=latarcyrheb-sun16\nKEYMAP=us\n"))
if err != nil {
panic(err)
}
// This is needed to disable predictable network interface names.
// The org.osbuild.udev.rules stage can't create empty files.
udevNetNameSlotRulesFile, err := fsnode.NewFile("/etc/udev/rules.d/80-net-name-slot.rules", nil, nil, nil, []byte{})
if err != nil {
panic(err)
}
// While cloud-init does this automatically on first boot for the specified user,
// this was in the original KS.
ec2UserSudoers, err := fsnode.NewFile("/etc/sudoers.d/ec2-user", common.ToPtr(os.FileMode(0o440)), nil, nil, []byte("ec2-user\tALL=(ALL)\tNOPASSWD: ALL\n"))
if err != nil {
panic(err)
}
// The image built from the original KS has this file with this content.
hostnameFile, err := fsnode.NewFile("/etc/hostname", nil, nil, nil, []byte("localhost.localdomain\n"))
if err != nil {
panic(err)
}
return &distro.ImageConfig{
Timezone: common.ToPtr("UTC"),
TimeSynchronization: &osbuild.ChronyStageOptions{
Servers: []osbuild.ChronyConfigServer{
{
Hostname: "0.rhel.pool.ntp.org",
Iburst: common.ToPtr(true),
},
{
Hostname: "1.rhel.pool.ntp.org",
Iburst: common.ToPtr(true),
},
{
Hostname: "2.rhel.pool.ntp.org",
Iburst: common.ToPtr(true),
},
{
Hostname: "3.rhel.pool.ntp.org",
Iburst: common.ToPtr(true),
},
{
Hostname: "169.254.169.123",
Prefer: common.ToPtr(true),
Iburst: common.ToPtr(true),
Minpoll: common.ToPtr(4),
Maxpoll: common.ToPtr(4),
},
},
// empty string will remove any occurrences of the option from the configuration
LeapsecTz: common.ToPtr(""),
},
EnabledServices: []string{
"sshd",
"rsyslog",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{
Filename: "logind.conf",
Config: osbuild.SystemdLogindConfigDropin{
Login: osbuild.SystemdLogindConfigLoginSection{
NAutoVTs: common.ToPtr(0),
},
},
},
},
CloudInit: []*osbuild.CloudInitStageOptions{
{
Filename: "00-rhel-default-user.cfg",
Config: osbuild.CloudInitConfigFile{
SystemInfo: &osbuild.CloudInitConfigSystemInfo{
DefaultUser: &osbuild.CloudInitConfigDefaultUser{
Name: "ec2-user",
},
},
},
},
{
Filename: "99-datasource.cfg",
Config: osbuild.CloudInitConfigFile{
DatasourceList: []string{
"Ec2",
"None",
},
},
},
},
Modprobe: []*osbuild.ModprobeStageOptions{
{
Filename: "blacklist-nouveau.conf",
Commands: osbuild.ModprobeConfigCmdList{
osbuild.NewModprobeConfigCmdBlacklist("nouveau"),
},
},
},
DracutConf: []*osbuild.DracutConfStageOptions{
{
Filename: "sgdisk.conf",
Config: osbuild.DracutConfigFile{
Install: []string{"sgdisk"},
},
},
},
SshdConfig: &osbuild.SshdConfigStageOptions{
Config: osbuild.SshdConfigConfig{
PasswordAuthentication: common.ToPtr(false),
},
},
Files: []*fsnode.File{
vconsoleFile,
udevNetNameSlotRulesFile,
ec2UserSudoers,
hostnameFile,
},
SELinuxForceRelabel: common.ToPtr(true),
}
}
func ec2PackageSet(t *rhel.ImageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
"@core",
"authconfig",
"kernel",
"yum-utils",
"cloud-init",
"dracut-config-generic",
"dracut-norescue",
"grub2",
"tar",
"rsync",
"rh-amazon-rhui-client",
"redhat-cloud-client-configuration",
"chrony",
"cloud-utils-growpart",
"gdisk",
},
Exclude: []string{
"aic94xx-firmware",
"alsa-firmware",
"alsa-lib",
"alsa-tools-firmware",
"ivtv-firmware",
"iwl1000-firmware",
"iwl100-firmware",
"iwl105-firmware",
"iwl135-firmware",
"iwl2000-firmware",
"iwl2030-firmware",
"iwl3160-firmware",
"iwl3945-firmware",
"iwl4965-firmware",
"iwl5000-firmware",
"iwl5150-firmware",
"iwl6000-firmware",
"iwl6000g2a-firmware",
"iwl6000g2b-firmware",
"iwl6050-firmware",
"iwl7260-firmware",
"libertas-sd8686-firmware",
"libertas-sd8787-firmware",
"libertas-usb8388-firmware",
"biosdevname",
"plymouth",
// NM is excluded by the original KS, but it is in the image built from it.
// "NetworkManager",
"iprutils",
// linux-firmware is uninstalled by the original KS, but it is a direct dependency of kernel,
// so we can't exclude it.
// "linux-firmware",
"firewalld",
},
}
}
func ec2PartitionTables(t *rhel.ImageType) (disk.PartitionTable, bool) {
switch t.Arch().Name() {
case arch.ARCH_X86_64.String():
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Size: 10 * common.GibiByte,
Partitions: []disk.Partition{
{
Size: 1 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 6144 * common.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
}, true
default:
return disk.PartitionTable{}, false
}
}

View file

@ -5,31 +5,39 @@ import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/subscription"
)
var azureRhuiImgType = imageType{
name: "azure-rhui",
filename: "disk.vhd.xz",
mimeType: "application/xz",
compression: "xz",
packageSets: map[string]packageSetFunc{
osPkgsKey: azureRhuiCommonPackageSet,
},
packageSetChains: map[string][]string{
osPkgsKey: {osPkgsKey, blueprintPkgsKey},
},
defaultImageConfig: azureDefaultImgConfig,
kernelOptions: "ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y",
bootable: true,
defaultSize: 64 * common.GibiByte,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc", "xz"},
exports: []string{"xz"},
basePartitionTables: azureRhuiBasePartitionTables,
func mkAzureRhuiImgType() *rhel.ImageType {
it := rhel.NewImageType(
"azure-rhui",
"disk.vhd.xz",
"application/xz",
map[string]rhel.PackageSetFunc{
rhel.OSPkgsKey: azureRhuiCommonPackageSet,
},
rhel.DiskImage,
[]string{"build"},
[]string{"os", "image", "vpc", "xz"},
[]string{"xz"},
)
// all RHEL 7 images should use sgdisk
it.DiskImagePartTool = common.ToPtr(osbuild.PTSgdisk)
// RHEL 7 qemu vpc subformat does not support force_size
it.DiskImageVPCForceSize = common.ToPtr(false)
it.Compression = "xz"
it.KernelOptions = "ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y"
it.DefaultImageConfig = azureDefaultImgConfig
it.Bootable = true
it.DefaultSize = 64 * common.GibiByte
it.BasePartitionTables = azureRhuiBasePartitionTables
return it
}
var azureDefaultImgConfig = &distro.ImageConfig{
@ -218,7 +226,7 @@ var azureDefaultImgConfig = &distro.ImageConfig{
DefaultTarget: common.ToPtr("multi-user.target"),
}
func azureRhuiCommonPackageSet(t *imageType) rpmmd.PackageSet {
func azureRhuiCommonPackageSet(t *rhel.ImageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"@base",
@ -261,7 +269,7 @@ func azureRhuiCommonPackageSet(t *imageType) rpmmd.PackageSet {
},
}
if t.arch.distro.isRHEL() {
if t.IsRHEL() {
ps = ps.Append(rpmmd.PackageSet{
Include: []string{
"insights-client",
@ -272,113 +280,119 @@ func azureRhuiCommonPackageSet(t *imageType) rpmmd.PackageSet {
return ps
}
var azureRhuiBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Size: 64 * common.GibiByte,
Partitions: []disk.Partition{
{
Size: 500 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
func azureRhuiBasePartitionTables(t *rhel.ImageType) (disk.PartitionTable, bool) {
switch t.Arch().Name() {
case arch.ARCH_X86_64.String():
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Size: 64 * common.GibiByte,
Partitions: []disk.Partition{
{
Size: 500 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
},
{
Size: 500 * common.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
{
Size: 500 * common.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
{
Size: 2 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Type: disk.LVMPartitionGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.LVMVolumeGroup{
Name: "rootvg",
Description: "built with lvm2 and osbuild",
LogicalVolumes: []disk.LVMLogicalVolume{
{
Size: 1 * common.GibiByte,
Name: "homelv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "home",
Mountpoint: "/home",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
{
Size: 2 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Type: disk.LVMPartitionGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.LVMVolumeGroup{
Name: "rootvg",
Description: "built with lvm2 and osbuild",
LogicalVolumes: []disk.LVMLogicalVolume{
{
Size: 1 * common.GibiByte,
Name: "homelv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "home",
Mountpoint: "/home",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
{
Size: 2 * common.GibiByte,
Name: "rootlv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
{
Size: 2 * common.GibiByte,
Name: "rootlv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
{
Size: 2 * common.GibiByte,
Name: "tmplv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "tmp",
Mountpoint: "/tmp",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
{
Size: 2 * common.GibiByte,
Name: "tmplv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "tmp",
Mountpoint: "/tmp",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
{
Size: 10 * common.GibiByte,
Name: "usrlv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "usr",
Mountpoint: "/usr",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
{
Size: 10 * common.GibiByte,
Name: "usrlv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "usr",
Mountpoint: "/usr",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
{
Size: 10 * common.GibiByte, // firedrill: 8 GB
Name: "varlv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "var",
Mountpoint: "/var",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
{
Size: 10 * common.GibiByte, // firedrill: 8 GB
Name: "varlv",
Payload: &disk.Filesystem{
Type: "xfs",
Label: "var",
Mountpoint: "/var",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
},
},
},
},
}, true
default:
return disk.PartitionTable{}, false
}
}

View file

@ -0,0 +1,120 @@
package rhel7
import (
"fmt"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
// RHEL-based OS image configuration defaults
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("en_US.UTF-8"),
GPGKeyFiles: []string{
"/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
KernelOptionsBootloader: common.ToPtr(true),
NoBLS: common.ToPtr(true), // RHEL 7 grub does not support BLS
}
}
func newDistro(name string, minor int) *rhel.Distribution {
rd, err := rhel.NewDistribution(name, 7, minor)
if err != nil {
panic(err)
}
rd.CheckOptions = checkOptions
rd.DefaultImageConfig = defaultDistroImageConfig
rd.DistCodename = "Maipo"
// Architecture definitions
x86_64 := rhel.NewArchitecture(rd, arch.ARCH_X86_64)
x86_64.AddImageTypes(
&platform.X86{
BIOS: true,
UEFIVendor: rd.Vendor(),
BasePlatform: platform.BasePlatform{
ImageFormat: platform.FORMAT_QCOW2,
QCOW2Compat: "0.10",
},
},
mkQcow2ImgType(),
)
x86_64.AddImageTypes(
&platform.X86{
BIOS: true,
UEFIVendor: rd.Vendor(),
BasePlatform: platform.BasePlatform{
ImageFormat: platform.FORMAT_VHD,
},
},
mkAzureRhuiImgType(),
)
x86_64.AddImageTypes(
&platform.X86{
BIOS: true,
BasePlatform: platform.BasePlatform{
ImageFormat: platform.FORMAT_RAW,
},
},
mkEc2ImgTypeX86_64(),
)
rd.AddArches(
x86_64,
)
return rd
}
func ParseID(idStr string) (*distro.ID, error) {
id, err := distro.ParseID(idStr)
if err != nil {
return nil, err
}
if id.Name != "rhel" {
return nil, fmt.Errorf("invalid distro name: %s", id.Name)
}
if id.MajorVersion != 7 {
return nil, fmt.Errorf("invalid distro major version: %d", id.MajorVersion)
}
// RHEL uses minor version
if id.Name == "rhel" && id.MinorVersion == -1 {
return nil, fmt.Errorf("rhel requires minor version, but got: %d", id.MinorVersion)
}
return id, nil
}
func DistroFactory(idStr string) distro.Distro {
id, err := ParseID(idStr)
if err != nil {
return nil
}
return newDistro(id.Name, id.MinorVersion)
}

View file

@ -0,0 +1,63 @@
package rhel7
import (
"fmt"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/policies"
)
// checkOptions checks the validity and compatibility of options and customizations for the image type.
// Returns ([]string, error) where []string, if non-nil, will hold any generated warnings (e.g. deprecation notices).
func checkOptions(t *rhel.ImageType, bp *blueprint.Blueprint, options distro.ImageOptions) ([]string, error) {
customizations := bp.Customizations
// holds warnings (e.g. deprecation notices)
var warnings []string
if len(bp.Containers) > 0 {
return warnings, fmt.Errorf("embedding containers is not supported for %s on %s", t.Name(), t.Arch().Distro().Name())
}
mountpoints := customizations.GetFilesystems()
err := blueprint.CheckMountpointsPolicy(mountpoints, policies.MountpointPolicies)
if err != nil {
return warnings, err
}
if osc := customizations.GetOpenSCAP(); osc != nil {
return warnings, fmt.Errorf(fmt.Sprintf("OpenSCAP unsupported os version: %s", t.Arch().Distro().OsVersion()))
}
// Check Directory/File Customizations are valid
dc := customizations.GetDirectories()
fc := customizations.GetFiles()
err = blueprint.ValidateDirFileCustomizations(dc, fc)
if err != nil {
return warnings, err
}
dcp := policies.CustomDirectoriesPolicies
fcp := policies.CustomFilesPolicies
err = blueprint.CheckDirectoryCustomizationsPolicy(dc, dcp)
if err != nil {
return warnings, err
}
err = blueprint.CheckFileCustomizationsPolicy(fc, fcp)
if err != nil {
return warnings, err
}
// check if repository customizations are valid
_, err = customizations.GetRepositories()
if err != nil {
return warnings, err
}
return warnings, nil
}

View file

@ -1,12 +1,13 @@
package rhel7
import (
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/rpmmd"
)
// packages that are only in some (sub)-distributions
func distroSpecificPackageSet(t *imageType) rpmmd.PackageSet {
if t.arch.distro.isRHEL() {
func distroSpecificPackageSet(t *rhel.ImageType) rpmmd.PackageSet {
if t.IsRHEL() {
return rpmmd.PackageSet{
Include: []string{"insights-client"},
}

View file

@ -0,0 +1,69 @@
package rhel7
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro/rhel"
)
func defaultBasePartitionTables(t *rhel.ImageType) (disk.PartitionTable, bool) {
switch t.Arch().Name() {
case arch.ARCH_X86_64.String():
return disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 1 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
}, true
default:
return disk.PartitionTable{}, false
}
}

View file

@ -3,27 +3,36 @@ package rhel7
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/subscription"
)
var qcow2ImgType = imageType{
name: "qcow2",
filename: "disk.qcow2",
mimeType: "application/x-qemu-disk",
kernelOptions: "console=tty0 console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=auto",
packageSets: map[string]packageSetFunc{
osPkgsKey: qcow2CommonPackageSet,
},
defaultImageConfig: qcow2DefaultImgConfig,
bootable: true,
defaultSize: 10 * common.GibiByte,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
basePartitionTables: defaultBasePartitionTables,
func mkQcow2ImgType() *rhel.ImageType {
it := rhel.NewImageType(
"qcow2",
"disk.qcow2",
"application/x-qemu-disk",
map[string]rhel.PackageSetFunc{
rhel.OSPkgsKey: qcow2CommonPackageSet,
},
rhel.DiskImage,
[]string{"build"},
[]string{"os", "image", "qcow2"},
[]string{"qcow2"},
)
// all RHEL 7 images should use sgdisk
it.DiskImagePartTool = common.ToPtr(osbuild.PTSgdisk)
it.KernelOptions = "console=tty0 console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=auto"
it.Bootable = true
it.DefaultSize = 10 * common.GibiByte
it.DefaultImageConfig = qcow2DefaultImgConfig
it.BasePartitionTables = defaultBasePartitionTables
return it
}
var qcow2DefaultImgConfig = &distro.ImageConfig{
@ -68,7 +77,7 @@ var qcow2DefaultImgConfig = &distro.ImageConfig{
},
}
func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
func qcow2CommonPackageSet(t *rhel.ImageType) rpmmd.PackageSet {
ps := rpmmd.PackageSet{
Include: []string{
"@core",

View file

@ -10,9 +10,9 @@ import (
)
const (
amiX86KernelOptions = "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto"
amiAarch64KernelOptions = "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0 crashkernel=auto"
amiSapKernelOptions = "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto processor.max_cstate=1 intel_idle.max_cstate=1"
amiX86KernelOptions = "console=tty0 console=ttyS0,115200n8 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto"
amiAarch64KernelOptions = "console=tty0 console=ttyS0,115200n8 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 iommu.strict=0 crashkernel=auto"
amiSapKernelOptions = "console=tty0 console=ttyS0,115200n8 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295 crashkernel=auto processor.max_cstate=1 intel_idle.max_cstate=1"
)
func mkAmiImgTypeX86_64() *rhel.ImageType {

View file

@ -10,7 +10,7 @@ import (
)
// TODO: move these to the EC2 environment
const amiKernelOptions = "console=ttyS0,115200n8 console=tty0 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295"
const amiKernelOptions = "console=tty0 console=ttyS0,115200n8 net.ifnames=0 rd.blacklist=nouveau nvme_core.io_timeout=4294967295"
// default EC2 images config (common for all architectures)
func baseEc2ImageConfig() *distro.ImageConfig {

View file

@ -1,273 +0,0 @@
package rhel7
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/runner"
)
const (
// package set names
// main/common os image package set name
osPkgsKey = "os"
// blueprint package set name
blueprintPkgsKey = "blueprint"
// location for saving openscap remediation data
oscapDataDir = "/oscap_data"
)
// RHEL-based OS image configuration defaults
var defaultDistroImageConfig = &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("en_US.UTF-8"),
GPGKeyFiles: []string{
"/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
KernelOptionsBootloader: common.ToPtr(true),
}
// --- Distribution ---
type distribution struct {
name string
product string
nick string
osVersion string
releaseVersion string
modulePlatformID string
vendor string
runner runner.Runner
arches map[string]distro.Arch
defaultImageConfig *distro.ImageConfig
}
func (d *distribution) Name() string {
return d.name
}
func (d *distribution) Releasever() string {
return d.releaseVersion
}
func (d *distribution) OsVersion() string {
return d.osVersion
}
func (d *distribution) Product() string {
return d.product
}
func (d *distribution) ModulePlatformID() string {
return d.modulePlatformID
}
func (d *distribution) OSTreeRef() string {
return "" // not supported
}
func (d *distribution) ListArches() []string {
archNames := make([]string, 0, len(d.arches))
for name := range d.arches {
archNames = append(archNames, name)
}
sort.Strings(archNames)
return archNames
}
func (d *distribution) GetArch(name string) (distro.Arch, error) {
arch, exists := d.arches[name]
if !exists {
return nil, errors.New("invalid architecture: " + name)
}
return arch, nil
}
func (d *distribution) addArches(arches ...architecture) {
if d.arches == nil {
d.arches = map[string]distro.Arch{}
}
// Do not make copies of architectures, as opposed to image types,
// because architecture definitions are not used by more than a single
// distro definition.
for idx := range arches {
d.arches[arches[idx].name] = &arches[idx]
}
}
func (d *distribution) isRHEL() bool {
return strings.HasPrefix(d.name, "rhel")
}
func (d *distribution) getDefaultImageConfig() *distro.ImageConfig {
return d.defaultImageConfig
}
// --- Architecture ---
type architecture struct {
distro *distribution
name string
imageTypes map[string]distro.ImageType
imageTypeAliases map[string]string
}
func (a *architecture) Name() string {
return a.name
}
func (a *architecture) ListImageTypes() []string {
itNames := make([]string, 0, len(a.imageTypes))
for name := range a.imageTypes {
itNames = append(itNames, name)
}
sort.Strings(itNames)
return itNames
}
func (a *architecture) GetImageType(name string) (distro.ImageType, error) {
t, exists := a.imageTypes[name]
if !exists {
aliasForName, exists := a.imageTypeAliases[name]
if !exists {
return nil, errors.New("invalid image type: " + name)
}
t, exists = a.imageTypes[aliasForName]
if !exists {
panic(fmt.Sprintf("image type '%s' is an alias to a non-existing image type '%s'", name, aliasForName))
}
}
return t, nil
}
func (a *architecture) addImageTypes(platform platform.Platform, imageTypes ...imageType) {
if a.imageTypes == nil {
a.imageTypes = map[string]distro.ImageType{}
}
for idx := range imageTypes {
it := imageTypes[idx]
it.arch = a
it.platform = platform
a.imageTypes[it.name] = &it
for _, alias := range it.nameAliases {
if a.imageTypeAliases == nil {
a.imageTypeAliases = map[string]string{}
}
if existingAliasFor, exists := a.imageTypeAliases[alias]; exists {
panic(fmt.Sprintf("image type alias '%s' for '%s' is already defined for another image type '%s'", alias, it.name, existingAliasFor))
}
a.imageTypeAliases[alias] = it.name
}
}
}
func (a *architecture) Distro() distro.Distro {
return a.distro
}
func newDistro(name string, minor int) *distribution {
var rd distribution
switch name {
case "rhel":
rd = distribution{
name: fmt.Sprintf("rhel-7.%d", minor),
product: "Red Hat Enterprise Linux",
osVersion: fmt.Sprintf("7.%d", minor),
nick: "Maipo",
releaseVersion: "7",
modulePlatformID: "platform:el7",
vendor: "redhat",
runner: &runner.RHEL{Major: uint64(7), Minor: uint64(minor)},
defaultImageConfig: defaultDistroImageConfig,
}
default:
panic(fmt.Sprintf("unknown distro name: %s", name))
}
// Architecture definitions
x86_64 := architecture{
name: arch.ARCH_X86_64.String(),
distro: &rd,
}
x86_64.addImageTypes(
&platform.X86{
BIOS: true,
UEFIVendor: rd.vendor,
BasePlatform: platform.BasePlatform{
ImageFormat: platform.FORMAT_QCOW2,
QCOW2Compat: "0.10",
},
},
qcow2ImgType,
)
x86_64.addImageTypes(
&platform.X86{
BIOS: true,
UEFIVendor: rd.vendor,
BasePlatform: platform.BasePlatform{
ImageFormat: platform.FORMAT_VHD,
},
},
azureRhuiImgType,
)
rd.addArches(
x86_64,
)
return &rd
}
func ParseID(idStr string) (*distro.ID, error) {
id, err := distro.ParseID(idStr)
if err != nil {
return nil, err
}
if id.Name != "rhel" {
return nil, fmt.Errorf("invalid distro name: %s", id.Name)
}
if id.MajorVersion != 7 {
return nil, fmt.Errorf("invalid distro major version: %d", id.MajorVersion)
}
// RHEL uses minor version
if id.Name == "rhel" && id.MinorVersion == -1 {
return nil, fmt.Errorf("rhel requires minor version, but got: %d", id.MinorVersion)
}
return id, nil
}
func DistroFactory(idStr string) distro.Distro {
id, err := ParseID(idStr)
if err != nil {
return nil
}
return newDistro(id.Name, id.MinorVersion)
}

View file

@ -1,269 +0,0 @@
package rhel7
import (
"fmt"
"math/rand"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/customizations/users"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/image"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/rpmmd"
)
func osCustomizations(
t *imageType,
osPackageSet rpmmd.PackageSet,
options distro.ImageOptions,
containers []container.SourceSpec,
c *blueprint.Customizations,
) (manifest.OSCustomizations, error) {
imageConfig := t.getDefaultImageConfig()
osc := manifest.OSCustomizations{}
if t.bootable {
osc.KernelName = c.GetKernel().Name
var kernelOptions []string
if t.kernelOptions != "" {
kernelOptions = append(kernelOptions, t.kernelOptions)
}
if bpKernel := c.GetKernel(); bpKernel.Append != "" {
kernelOptions = append(kernelOptions, bpKernel.Append)
}
osc.KernelOptionsAppend = kernelOptions
if imageConfig.KernelOptionsBootloader != nil {
osc.KernelOptionsBootloader = *imageConfig.KernelOptionsBootloader
}
}
osc.ExtraBasePackages = osPackageSet.Include
osc.ExcludeBasePackages = osPackageSet.Exclude
osc.ExtraBaseRepos = osPackageSet.Repositories
osc.Containers = containers
osc.GPGKeyFiles = imageConfig.GPGKeyFiles
if imageConfig.ExcludeDocs != nil {
osc.ExcludeDocs = *imageConfig.ExcludeDocs
}
// don't put users and groups in the payload of an installer
// add them via kickstart instead
osc.Groups = users.GroupsFromBP(c.GetGroups())
osc.Users = users.UsersFromBP(c.GetUsers())
osc.EnabledServices = imageConfig.EnabledServices
osc.DisabledServices = imageConfig.DisabledServices
osc.MaskedServices = imageConfig.MaskedServices
if imageConfig.DefaultTarget != nil {
osc.DefaultTarget = *imageConfig.DefaultTarget
}
osc.Firewall = imageConfig.Firewall
if fw := c.GetFirewall(); fw != nil {
options := osbuild.FirewallStageOptions{
Ports: fw.Ports,
}
if fw.Services != nil {
options.EnabledServices = fw.Services.Enabled
options.DisabledServices = fw.Services.Disabled
}
if fw.Zones != nil {
for _, z := range fw.Zones {
options.Zones = append(options.Zones, osbuild.FirewallZone{
Name: *z.Name,
Sources: z.Sources,
})
}
}
osc.Firewall = &options
}
language, keyboard := c.GetPrimaryLocale()
if language != nil {
osc.Language = *language
} else if imageConfig.Locale != nil {
osc.Language = *imageConfig.Locale
}
if keyboard != nil {
osc.Keyboard = keyboard
} else if imageConfig.Keyboard != nil {
osc.Keyboard = &imageConfig.Keyboard.Keymap
if imageConfig.Keyboard.X11Keymap != nil {
osc.X11KeymapLayouts = imageConfig.Keyboard.X11Keymap.Layouts
}
}
if hostname := c.GetHostname(); hostname != nil {
osc.Hostname = *hostname
}
timezone, ntpServers := c.GetTimezoneSettings()
if timezone != nil {
osc.Timezone = *timezone
} else if imageConfig.Timezone != nil {
osc.Timezone = *imageConfig.Timezone
}
if len(ntpServers) > 0 {
for _, server := range ntpServers {
osc.NTPServers = append(osc.NTPServers, osbuild.ChronyConfigServer{Hostname: server})
}
} else if imageConfig.TimeSynchronization != nil {
osc.NTPServers = imageConfig.TimeSynchronization.Servers
osc.LeapSecTZ = imageConfig.TimeSynchronization.LeapsecTz
}
// Relabel the tree, unless the `NoSElinux` flag is explicitly set to `true`
if imageConfig.NoSElinux == nil || imageConfig.NoSElinux != nil && !*imageConfig.NoSElinux {
osc.SElinux = "targeted"
osc.SELinuxForceRelabel = imageConfig.SELinuxForceRelabel
}
if oscapConfig := c.GetOpenSCAP(); oscapConfig != nil {
var datastream = oscapConfig.DataStream
if datastream == "" {
if imageConfig.DefaultOSCAPDatastream == nil {
return manifest.OSCustomizations{}, fmt.Errorf("No OSCAP datastream specified and the distro does not have any default set")
}
datastream = *imageConfig.DefaultOSCAPDatastream
}
osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(
oscapDataDir,
osbuild.OscapConfig{
Datastream: datastream,
ProfileID: oscapConfig.ProfileID,
Compression: true,
},
)
}
if t.arch.distro.isRHEL() && options.Facts != nil {
osc.FactAPIType = &options.Facts.APIType
}
var err error
osc.Directories, err = blueprint.DirectoryCustomizationsToFsNodeDirectories(c.GetDirectories())
if err != nil {
// In theory this should never happen, because the blueprint directory customizations
// should have been validated before this point.
panic(fmt.Sprintf("failed to convert directory customizations to fs node directories: %v", err))
}
osc.Files, err = blueprint.FileCustomizationsToFsNodeFiles(c.GetFiles())
if err != nil {
// In theory this should never happen, because the blueprint file customizations
// should have been validated before this point.
panic(fmt.Sprintf("failed to convert file customizations to fs node files: %v", err))
}
// set yum repos first, so it doesn't get overridden by
// imageConfig.YUMRepos
osc.YUMRepos = imageConfig.YUMRepos
customRepos, err := c.GetRepositories()
if err != nil {
// This shouldn't happen and since the repos
// should have already been validated
panic(fmt.Sprintf("failed to get custom repos: %v", err))
}
// This function returns a map of filename and corresponding yum repos
// and a list of fs node files for the inline gpg keys so we can save
// them to disk. This step also swaps the inline gpg key with the path
// to the file in the os file tree
yumRepos, gpgKeyFiles, err := blueprint.RepoCustomizationsToRepoConfigAndGPGKeyFiles(customRepos)
if err != nil {
panic(fmt.Sprintf("failed to convert inline gpgkeys to fs node files: %v", err))
}
// add the gpg key files to the list of files to be added to the tree
if len(gpgKeyFiles) > 0 {
osc.Files = append(osc.Files, gpgKeyFiles...)
}
for filename, repos := range yumRepos {
osc.YUMRepos = append(osc.YUMRepos, osbuild.NewYumReposStageOptions(filename, repos))
}
osc.ShellInit = imageConfig.ShellInit
osc.Grub2Config = imageConfig.Grub2Config
osc.Sysconfig = imageConfig.Sysconfig
osc.SystemdLogind = imageConfig.SystemdLogind
osc.CloudInit = imageConfig.CloudInit
osc.Modprobe = imageConfig.Modprobe
osc.DracutConf = imageConfig.DracutConf
osc.SystemdUnit = imageConfig.SystemdUnit
osc.Authselect = imageConfig.Authselect
osc.SELinuxConfig = imageConfig.SELinuxConfig
osc.Tuned = imageConfig.Tuned
osc.Tmpfilesd = imageConfig.Tmpfilesd
osc.PamLimitsConf = imageConfig.PamLimitsConf
osc.Sysctld = imageConfig.Sysctld
osc.DNFConfig = imageConfig.DNFConfig
osc.DNFAutomaticConfig = imageConfig.DNFAutomaticConfig
osc.YUMConfig = imageConfig.YumConfig
osc.SshdConfig = imageConfig.SshdConfig
osc.AuthConfig = imageConfig.Authconfig
osc.PwQuality = imageConfig.PwQuality
osc.RHSMConfig = imageConfig.RHSMConfig
osc.Subscription = options.Subscription
osc.WAAgentConfig = imageConfig.WAAgentConfig
osc.UdevRules = imageConfig.UdevRules
osc.GCPGuestAgentConfig = imageConfig.GCPGuestAgentConfig
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)
return osc, nil
}
func diskImage(workload workload.Workload,
t *imageType,
customizations *blueprint.Customizations,
options distro.ImageOptions,
packageSets map[string]rpmmd.PackageSet,
containers []container.SourceSpec,
rng *rand.Rand) (image.ImageKind, error) {
img := image.NewDiskImage()
img.Platform = t.platform
var err error
img.OSCustomizations, err = osCustomizations(t, packageSets[osPkgsKey], options, containers, customizations)
if err != nil {
return nil, err
}
img.Environment = t.environment
img.Workload = workload
img.Compression = t.compression
img.PartTool = osbuild.PTSgdisk // all RHEL 7 images should use sgdisk
img.ForceSize = common.ToPtr(false) // RHEL 7 qemu vpc subformat does not support force_size
img.NoBLS = true // RHEL 7 grub does not support BLS
img.OSProduct = t.arch.distro.product
img.OSVersion = t.arch.distro.osVersion
img.OSNick = t.arch.distro.nick
// TODO: move generation into LiveImage
pt, err := t.getPartitionTable(customizations.GetFilesystems(), options, rng)
if err != nil {
return nil, err
}
img.PartitionTable = pt
img.Filename = t.Filename()
return img, nil
}

View file

@ -1,310 +0,0 @@
package rhel7
import (
"fmt"
"math/rand"
"github.com/osbuild/images/internal/environment"
"github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/image"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/policies"
"github.com/osbuild/images/pkg/rpmmd"
"golang.org/x/exp/slices"
)
type packageSetFunc func(t *imageType) rpmmd.PackageSet
type imageFunc func(workload workload.Workload, t *imageType, customizations *blueprint.Customizations, options distro.ImageOptions, packageSets map[string]rpmmd.PackageSet, containers []container.SourceSpec, rng *rand.Rand) (image.ImageKind, error)
type isoLabelFunc func(t *imageType) string
type imageType struct {
arch *architecture
platform platform.Platform
environment environment.Environment
workload workload.Workload
name string
nameAliases []string
filename string
compression string // TODO: remove from image definition and make it a transport option
mimeType string
packageSets map[string]packageSetFunc
packageSetChains map[string][]string
defaultImageConfig *distro.ImageConfig
kernelOptions string
defaultSize uint64
buildPipelines []string
payloadPipelines []string
exports []string
image imageFunc
isoLabel isoLabelFunc
// bootISO: installable ISO
bootISO bool
// bootable image
bootable bool
// List of valid arches for the image type
basePartitionTables distro.BasePartitionTableMap
}
func (t *imageType) Name() string {
return t.name
}
func (t *imageType) Arch() distro.Arch {
return t.arch
}
func (t *imageType) Filename() string {
return t.filename
}
func (t *imageType) MIMEType() string {
return t.mimeType
}
func (t *imageType) OSTreeRef() string {
// Not supported
return ""
}
func (t *imageType) ISOLabel() (string, error) {
if !t.bootISO {
return "", fmt.Errorf("image type %q is not an ISO", t.name)
}
if t.isoLabel != nil {
return t.isoLabel(t), nil
}
return "", nil
}
func (t *imageType) Size(size uint64) uint64 {
if size == 0 {
size = t.defaultSize
}
return size
}
func (t *imageType) BuildPipelines() []string {
return t.buildPipelines
}
func (t *imageType) PayloadPipelines() []string {
return t.payloadPipelines
}
func (t *imageType) PayloadPackageSets() []string {
return []string{blueprintPkgsKey}
}
func (t *imageType) PackageSetsChains() map[string][]string {
return t.packageSetChains
}
func (t *imageType) Exports() []string {
if len(t.exports) == 0 {
panic(fmt.Sprintf("programming error: no exports for '%s'", t.name))
}
return t.exports
}
func (t *imageType) BootMode() distro.BootMode {
if t.platform.GetUEFIVendor() != "" && t.platform.GetBIOSPlatform() != "" {
return distro.BOOT_HYBRID
} else if t.platform.GetUEFIVendor() != "" {
return distro.BOOT_UEFI
} else if t.platform.GetBIOSPlatform() != "" || t.platform.GetZiplSupport() {
return distro.BOOT_LEGACY
}
return distro.BOOT_NONE
}
func (t *imageType) getPartitionTable(
mountpoints []blueprint.FilesystemCustomization,
options distro.ImageOptions,
rng *rand.Rand,
) (*disk.PartitionTable, error) {
archName := t.arch.Name()
basePartitionTable, exists := t.basePartitionTables[archName]
if !exists {
return nil, fmt.Errorf("unknown arch: " + archName)
}
imageSize := t.Size(options.Size)
return disk.NewPartitionTable(&basePartitionTable, mountpoints, imageSize, options.PartitioningMode, nil, rng)
}
func (t *imageType) getDefaultImageConfig() *distro.ImageConfig {
// ensure that image always returns non-nil default config
imageConfig := t.defaultImageConfig
if imageConfig == nil {
imageConfig = &distro.ImageConfig{}
}
return imageConfig.InheritFrom(t.arch.distro.getDefaultImageConfig())
}
func (t *imageType) PartitionType() string {
archName := t.arch.Name()
basePartitionTable, exists := t.basePartitionTables[archName]
if !exists {
return ""
}
return basePartitionTable.Type
}
func (t *imageType) Manifest(bp *blueprint.Blueprint,
options distro.ImageOptions,
repos []rpmmd.RepoConfig,
seed int64) (*manifest.Manifest, []string, error) {
warnings, err := t.checkOptions(bp, options)
if err != nil {
return nil, nil, err
}
// merge package sets that appear in the image type with the package sets
// of the same name from the distro and arch
staticPackageSets := make(map[string]rpmmd.PackageSet)
for name, getter := range t.packageSets {
staticPackageSets[name] = getter(t)
}
// amend with repository information and collect payload repos
payloadRepos := make([]rpmmd.RepoConfig, 0)
for _, repo := range repos {
if len(repo.PackageSets) > 0 {
// only apply the repo to the listed package sets
for _, psName := range repo.PackageSets {
if slices.Contains(t.PayloadPackageSets(), psName) {
payloadRepos = append(payloadRepos, repo)
}
ps := staticPackageSets[psName]
ps.Repositories = append(ps.Repositories, repo)
staticPackageSets[psName] = ps
}
}
}
w := t.workload
if w == nil {
cw := &workload.Custom{
BaseWorkload: workload.BaseWorkload{
Repos: payloadRepos,
},
Packages: bp.GetPackagesEx(false),
}
if services := bp.Customizations.GetServices(); services != nil {
cw.Services = services.Enabled
cw.DisabledServices = services.Disabled
}
w = cw
}
containerSources := make([]container.SourceSpec, len(bp.Containers))
for idx, cont := range bp.Containers {
containerSources[idx] = container.SourceSpec{
Source: cont.Source,
Name: cont.Name,
TLSVerify: cont.TLSVerify,
Local: cont.LocalStorage,
}
}
source := rand.NewSource(seed)
// math/rand is good enough in this case
/* #nosec G404 */
rng := rand.New(source)
if t.image == nil {
return nil, nil, nil
}
img, err := t.image(w, t, bp.Customizations, options, staticPackageSets, containerSources, rng)
if err != nil {
return nil, nil, err
}
mf := manifest.New()
mf.Distro = manifest.DISTRO_EL7
_, err = img.InstantiateManifest(&mf, repos, t.arch.distro.runner, rng)
if err != nil {
return nil, nil, err
}
return &mf, warnings, err
}
// checkOptions checks the validity and compatibility of options and customizations for the image type.
// Returns ([]string, error) where []string, if non-nil, will hold any generated warnings (e.g. deprecation notices).
func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOptions) ([]string, error) {
customizations := bp.Customizations
// holds warnings (e.g. deprecation notices)
var warnings []string
if t.workload != nil {
// For now, if an image type defines its own workload, don't allow any
// user customizations.
// Soon we will have more workflows and each will define its allowed
// set of customizations. The current set of customizations defined in
// the blueprint spec corresponds to the Custom workflow.
if customizations != nil {
return warnings, fmt.Errorf(distro.NoCustomizationsAllowedError, t.name)
}
}
if len(bp.Containers) > 0 {
return warnings, fmt.Errorf("embedding containers is not supported for %s on %s", t.name, t.arch.distro.name)
}
mountpoints := customizations.GetFilesystems()
err := blueprint.CheckMountpointsPolicy(mountpoints, policies.MountpointPolicies)
if err != nil {
return warnings, err
}
if osc := customizations.GetOpenSCAP(); osc != nil {
return warnings, fmt.Errorf(fmt.Sprintf("OpenSCAP unsupported os version: %s", t.arch.distro.osVersion))
}
// Check Directory/File Customizations are valid
dc := customizations.GetDirectories()
fc := customizations.GetFiles()
err = blueprint.ValidateDirFileCustomizations(dc, fc)
if err != nil {
return warnings, err
}
dcp := policies.CustomDirectoriesPolicies
fcp := policies.CustomFilesPolicies
err = blueprint.CheckDirectoryCustomizationsPolicy(dc, dcp)
if err != nil {
return warnings, err
}
err = blueprint.CheckFileCustomizationsPolicy(fc, fcp)
if err != nil {
return warnings, err
}
// check if repository customizations are valid
_, err = customizations.GetRepositories()
if err != nil {
return warnings, err
}
return warnings, nil
}

View file

@ -1,63 +0,0 @@
package rhel7
import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
)
var defaultBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
Partitions: []disk.Partition{
{
Size: 1 * common.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}

View file

@ -78,6 +78,10 @@ func (d *TestDistro) Name() string {
return d.name
}
func (d *TestDistro) Codename() string {
return "" // not supported
}
func (d *TestDistro) Releasever() string {
return d.releasever
}

View file

@ -7,9 +7,9 @@ import (
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/fedora"
"github.com/osbuild/images/pkg/distro/rhel/rhel10"
"github.com/osbuild/images/pkg/distro/rhel/rhel7"
"github.com/osbuild/images/pkg/distro/rhel/rhel8"
"github.com/osbuild/images/pkg/distro/rhel/rhel9"
"github.com/osbuild/images/pkg/distro/rhel7"
"github.com/osbuild/images/pkg/distro/test_distro"
)

View file

@ -4,9 +4,9 @@ import (
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/fedora"
"github.com/osbuild/images/pkg/distro/rhel/rhel10"
"github.com/osbuild/images/pkg/distro/rhel/rhel7"
"github.com/osbuild/images/pkg/distro/rhel/rhel8"
"github.com/osbuild/images/pkg/distro/rhel/rhel9"
"github.com/osbuild/images/pkg/distro/rhel7"
)
var DefaultParser = NewDefaultParser()

View file

@ -42,6 +42,12 @@ type AnacondaContainerInstaller struct {
AdditionalAnacondaModules []string
AdditionalDrivers []string
FIPS bool
// Kernel options that will be apended to the installed system
// (not the iso)
KickstartKernelOptionsAppend []string
// Enable networking on on boot in the installed system
KickstartNetworkOnBoot bool
}
func NewAnacondaContainerInstaller(container container.SourceSpec, ref string) *AnacondaContainerInstaller {
@ -114,6 +120,9 @@ func (img *AnacondaContainerInstaller) InstantiateManifest(m *manifest.Manifest,
isoTreePipeline.OSName = img.OSName
isoTreePipeline.Users = img.Users
isoTreePipeline.Groups = img.Groups
isoTreePipeline.KickstartKernelOptionsAppend = img.KickstartKernelOptionsAppend
isoTreePipeline.KickstartNetworkOnBoot = img.KickstartNetworkOnBoot
isoTreePipeline.SquashfsCompression = img.SquashfsCompression

View file

@ -26,11 +26,14 @@ type BootcDiskImage struct {
// Customizations
KernelOptionsAppend []string
// "Users" is a bit misleading as only root and its ssh key is supported
// right now because that is all that bootc gives us by default but that
// will most likely change over time.
// See https://github.com/containers/bootc/pull/267
Users []users.User
// The users to put into the image, note that /etc/paswd (and friends)
// will become unmanaged state by bootc when used
Users []users.User
Groups []users.Group
// SELinux policy, when set it enables the labeling of the tree with the
// selected profile
SELinux string
}
func NewBootcDiskImage(container container.SourceSpec) *BootcDiskImage {
@ -52,24 +55,24 @@ func (img *BootcDiskImage) InstantiateManifestFromContainers(m *manifest.Manifes
// this is signified by passing nil to the below pipelines.
var hostPipeline manifest.Build
// TODO: no support for customization right now but minimal support
// for root ssh keys is supported
baseImage := manifest.NewRawBootcImage(buildPipeline, containers, img.Platform)
baseImage.PartitionTable = img.PartitionTable
baseImage.Users = img.Users
baseImage.KernelOptionsAppend = img.KernelOptionsAppend
rawImage := manifest.NewRawBootcImage(buildPipeline, containers, img.Platform)
rawImage.PartitionTable = img.PartitionTable
rawImage.Users = img.Users
rawImage.Groups = img.Groups
rawImage.KernelOptionsAppend = img.KernelOptionsAppend
rawImage.SELinux = img.SELinux
// In BIB, we export multiple images from the same pipeline so we use the
// filename as the basename for each export and set the extensions based on
// each file format.
fileBasename := img.Filename
baseImage.SetFilename(fmt.Sprintf("%s.raw", fileBasename))
rawImage.SetFilename(fmt.Sprintf("%s.raw", fileBasename))
qcow2Pipeline := manifest.NewQCOW2(hostPipeline, baseImage)
qcow2Pipeline := manifest.NewQCOW2(hostPipeline, rawImage)
qcow2Pipeline.Compat = img.Platform.GetQCOW2Compat()
qcow2Pipeline.SetFilename(fmt.Sprintf("%s.qcow2", fileBasename))
vmdkPipeline := manifest.NewVMDK(hostPipeline, baseImage)
vmdkPipeline := manifest.NewVMDK(hostPipeline, rawImage)
vmdkPipeline.SetFilename(fmt.Sprintf("%s.vmdk", fileBasename))
ovfPipeline := manifest.NewOVF(hostPipeline, vmdkPipeline)

View file

@ -0,0 +1,86 @@
package image
import (
"fmt"
"math/rand"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/runner"
)
// Legacy pipeline to create bootc images. This can be removed once
//
// https://github.com/containers/bootc/pull/462
//
// or
//
// https://www.mail-archive.com/qemu-devel@nongnu.org/msg1034508.html
//
// is available everyhwere
type BootcLegacyDiskImage struct {
bootcImg *BootcDiskImage
}
func NewBootcLegacyDiskImage(real *BootcDiskImage) *BootcLegacyDiskImage {
return &BootcLegacyDiskImage{
bootcImg: real,
}
}
func (img *BootcLegacyDiskImage) InstantiateManifestFromContainers(m *manifest.Manifest,
containers []container.SourceSpec,
runner runner.Runner,
rng *rand.Rand) error {
// XXX: hardcoded for now
ref := "ostree/1/1/0"
ostreeImg := &OSTreeDiskImage{
Base: NewBase("bootc-raw-image"),
ContainerSource: img.bootcImg.ContainerSource,
Ref: ref,
OSName: "default",
}
ostreeImg.Platform = img.bootcImg.Platform
ostreeImg.PartitionTable = img.bootcImg.PartitionTable
ostreeImg.OSTreeDeploymentCustomizations.KernelOptionsAppend = img.bootcImg.KernelOptionsAppend
ostreeImg.OSTreeDeploymentCustomizations.Users = img.bootcImg.Users
ostreeImg.OSTreeDeploymentCustomizations.Groups = img.bootcImg.Groups
buildPipeline := manifest.NewBuildFromContainer(m, runner, containers, &manifest.BuildOptions{ContainerBuildable: true})
buildPipeline.Checkpoint()
// In the bootc flow, we reuse the host container context for tools;
// this is signified by passing nil to the below pipelines.
var hostPipeline manifest.Build
opts := &baseRawOstreeImageOpts{useBootupd: true}
fileBasename := img.bootcImg.Filename
// In BIB, we export multiple images from the same pipeline so we use the
// filename as the basename for each export and set the extensions based on
// each file format.
baseImage := baseRawOstreeImage(ostreeImg, buildPipeline, opts)
baseImage.SetFilename(fmt.Sprintf("%s.raw", fileBasename))
qcow2Pipeline := manifest.NewQCOW2(hostPipeline, baseImage)
qcow2Pipeline.Compat = ostreeImg.Platform.GetQCOW2Compat()
qcow2Pipeline.SetFilename(fmt.Sprintf("%s.qcow2", fileBasename))
vmdkPipeline := manifest.NewVMDK(hostPipeline, baseImage)
vmdkPipeline.SetFilename(fmt.Sprintf("%s.vmdk", fileBasename))
ovfPipeline := manifest.NewOVF(hostPipeline, vmdkPipeline)
tarPipeline := manifest.NewTar(hostPipeline, ovfPipeline, "archive")
tarPipeline.Format = osbuild.TarArchiveFormatUstar
tarPipeline.SetFilename(fmt.Sprintf("%s.tar", fileBasename))
// The .ovf descriptor needs to be the first file in the archive
tarPipeline.Paths = []string{
fmt.Sprintf("%s.ovf", fileBasename),
fmt.Sprintf("%s.mf", fileBasename),
fmt.Sprintf("%s.vmdk", fileBasename),
}
return nil
}

View file

@ -27,8 +27,10 @@ type DiskImage struct {
Workload workload.Workload
Filename string
Compression string
ForceSize *bool
PartTool osbuild.PartTool
// Control the VPC subformat use of force_size
VPCForceSize *bool
PartTool osbuild.PartTool
NoBLS bool
OSProduct string
@ -59,7 +61,6 @@ func (img *DiskImage) InstantiateManifest(m *manifest.Manifest,
osPipeline.OSCustomizations = img.OSCustomizations
osPipeline.Environment = img.Environment
osPipeline.Workload = img.Workload
osPipeline.NoBLS = img.NoBLS
osPipeline.OSProduct = img.OSProduct
osPipeline.OSVersion = img.OSVersion
osPipeline.OSNick = img.OSNick
@ -80,7 +81,7 @@ func (img *DiskImage) InstantiateManifest(m *manifest.Manifest,
imagePipeline = qcow2Pipeline
case platform.FORMAT_VHD:
vpcPipeline := manifest.NewVPC(buildPipeline, rawImagePipeline)
vpcPipeline.ForceSize = img.ForceSize
vpcPipeline.ForceSize = img.VPCForceSize
imagePipeline = vpcPipeline
case platform.FORMAT_VMDK:
imagePipeline = manifest.NewVMDK(buildPipeline, rawImagePipeline)

View file

@ -34,6 +34,12 @@ type AnacondaInstallerISOTree struct {
Keyboard *string
Timezone *string
// Kernel options that will be apended to the installed system
// (not the iso)
KickstartKernelOptionsAppend []string
// Enable networking on on boot in the installed system
KickstartNetworkOnBoot bool
// Create a sudoers drop-in file for each user or group to enable the
// NOPASSWD option
NoPasswd []string
@ -67,6 +73,7 @@ type AnacondaInstallerISOTree struct {
ContainerSource *container.SourceSpec
containerSpec *container.Spec
// Kernel options for the ISO image
KernelOpts []string
// Enable ISOLinux stage
@ -437,6 +444,22 @@ func (p *AnacondaInstallerISOTree) ostreeContainerStages() []*osbuild.Stage {
kickstartOptions.ClearPart = &osbuild.ClearPartOptions{
All: true,
}
if len(p.KickstartKernelOptionsAppend) > 0 {
kickstartOptions.Bootloader = &osbuild.BootloaderOptions{
// We currently leaves quoting to the
// user. This is generally ok - to do better
// we will have to mimic the kernel arg
// parser, see
// https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html
// and lib/cmdline.c in the kernel source
Append: strings.Join(p.KickstartKernelOptionsAppend, " "),
}
}
if p.KickstartNetworkOnBoot {
kickstartOptions.Network = []osbuild.NetworkOptions{
{BootProto: "dhcp", Device: "link", Activate: common.ToPtr(true), OnBoot: "on"},
}
}
stages = append(stages, osbuild.NewKickstartStage(kickstartOptions))

View file

@ -136,6 +136,10 @@ type OSCustomizations struct {
Files []*fsnode.File
FIPS bool
// NoBLS configures the image bootloader with traditional menu entries
// instead of BLS. Required for legacy systems like RHEL 7.
NoBLS bool
}
// OS represents the filesystem tree of the target image. This roughly
@ -171,9 +175,6 @@ type OS struct {
platform platform.Platform
kernelVer string
// NoBLS configures the image bootloader with traditional menu entries
// instead of BLS. Required for legacy systems like RHEL 7.
NoBLS bool
OSProduct string
OSVersion string
OSNick string

View file

@ -2,7 +2,9 @@ package manifest
import (
"fmt"
"os"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/artifact"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/customizations/users"
@ -31,11 +33,14 @@ type RawBootcImage struct {
KernelOptionsAppend []string
// "Users" is a bit misleading as only root and its ssh key is supported
// right now because that is all that bootc gives us by default but that
// will most likely change over time.
// See https://github.com/containers/bootc/pull/267
Users []users.User
// The users to put into the image, note that /etc/paswd (and friends)
// will become unmanaged state by bootc when used
Users []users.User
Groups []users.Group
// SELinux policy, when set it enables the labeling of the tree with the
// selected profile
SELinux string
}
func (p RawBootcImage) Filename() string {
@ -88,13 +93,6 @@ func (p *RawBootcImage) serialize() osbuild.Pipeline {
panic(fmt.Errorf("no partition table in live image"))
}
if len(p.Users) > 1 {
panic(fmt.Errorf("raw bootc image only supports a single root key for user customization, got %v", p.Users))
}
if len(p.Users) == 1 && p.Users[0].Name != "root" {
panic(fmt.Errorf("raw bootc image only supports the root user, got %v", p.Users))
}
for _, stage := range osbuild.GenImagePrepareStages(pt, p.filename, osbuild.PTSfdisk) {
pipeline.AddStage(stage)
}
@ -105,8 +103,8 @@ func (p *RawBootcImage) serialize() osbuild.Pipeline {
opts := &osbuild.BootcInstallToFilesystemOptions{
Kargs: p.KernelOptionsAppend,
}
if len(p.Users) == 1 && p.Users[0].Key != nil {
opts.RootSSHAuthorizedKeys = []string{*p.Users[0].Key}
if len(p.containers) > 0 {
opts.TargetImgref = p.containers[0].Name
}
inputs := osbuild.ContainerDeployInputs{
Images: osbuild.NewContainersInputForSingleSource(p.containerSpecs[0]),
@ -121,17 +119,72 @@ func (p *RawBootcImage) serialize() osbuild.Pipeline {
}
pipeline.AddStage(st)
// XXX: there is no way right now to support any customizations,
// we cannot touch the filesystem after bootc installed it or
// we risk messing with it's selinux labels or future fsverity
// magic. Once we have a mechanism like --copy-etc from
// https://github.com/containers/bootc/pull/267 things should
// be a bit better
for _, stage := range osbuild.GenImageFinishStages(pt, p.filename) {
pipeline.AddStage(stage)
}
// all our customizations work directly on the mounted deployment
// root from the image so generate the devices/mounts for all
devices, mounts, err = osbuild.GenBootupdDevicesMounts(p.filename, p.PartitionTable)
if err != nil {
panic(fmt.Sprintf("gen devices stage failed %v", err))
}
mounts = append(mounts, *osbuild.NewOSTreeDeploymentMountDefault("ostree.deployment", osbuild.OSTreeMountSourceMount))
mounts = append(mounts, *osbuild.NewBindMount("bind-ostree-deployment-to-tree", "mount://", "tree://"))
// we always include the fstab stage
fstabStage := osbuild.NewFSTabStage(osbuild.NewFSTabStageOptions(pt))
fstabStage.Mounts = mounts
fstabStage.Devices = devices
pipeline.AddStage(fstabStage)
// customize the image
if len(p.Groups) > 0 {
groupsStage := osbuild.GenGroupsStage(p.Groups)
if err != nil {
panic(fmt.Sprintf("group stage failed %v", err))
}
groupsStage.Mounts = mounts
groupsStage.Devices = devices
pipeline.AddStage(groupsStage)
}
if len(p.Users) > 0 {
// ensure /var/home is available
mkdirStage := osbuild.NewMkdirStage(&osbuild.MkdirStageOptions{
Paths: []osbuild.MkdirStagePath{
{
Path: "/var/home",
Mode: common.ToPtr(os.FileMode(0755)),
ExistOk: true,
},
},
})
mkdirStage.Mounts = mounts
mkdirStage.Devices = devices
pipeline.AddStage(mkdirStage)
// add the users
usersStage, err := osbuild.GenUsersStage(p.Users, false)
if err != nil {
panic(fmt.Sprintf("user stage failed %v", err))
}
usersStage.Mounts = mounts
usersStage.Devices = devices
pipeline.AddStage(usersStage)
// add selinux
if p.SELinux != "" {
opts := &osbuild.SELinuxStageOptions{
FileContexts: fmt.Sprintf("etc/selinux/%s/contexts/files/file_contexts", p.SELinux),
ExcludePaths: []string{"/sysroot"},
}
selinuxStage := osbuild.NewSELinuxStage(opts)
selinuxStage.Mounts = mounts
selinuxStage.Devices = devices
pipeline.AddStage(selinuxStage)
}
}
return pipeline
}

View file

@ -0,0 +1,30 @@
package osbuild
import (
"fmt"
"strings"
)
type BindMountOptions struct {
Source string `json:"source,omitempty"`
}
func (BindMountOptions) isMountOptions() {}
func NewBindMount(name, source, target string) *Mount {
if !strings.HasPrefix(source, "mount://") {
panic(fmt.Errorf(`bind mount source must start with "mount://", got %q`, source))
}
if !strings.HasPrefix(target, "tree://") {
panic(fmt.Errorf(`bind mount target must start with "tree://", got %q`, target))
}
return &Mount{
Type: "org.osbuild.bind",
Name: name,
Target: target,
Options: &BindMountOptions{
Source: source,
},
}
}

View file

@ -9,6 +9,9 @@ type BootcInstallToFilesystemOptions struct {
RootSSHAuthorizedKeys []string `json:"root-ssh-authorized-keys,omitempty"`
// options for --karg
Kargs []string `json:"kernel-args,omitempty"`
// option for --target-imgref
TargetImgref string `json:"target-imgref"`
}
func (BootcInstallToFilesystemOptions) isStageOptions() {}

View file

@ -2,6 +2,8 @@ package osbuild
import (
"fmt"
"golang.org/x/exp/slices"
)
type CloudInitStageOptions struct {
@ -86,10 +88,12 @@ func (c CloudInitConfigFile) validate() error {
return err
}
}
allowedDatasources := []string{"Azure", "Ec2", "None"}
if len(c.DatasourceList) > 0 {
for _, d := range c.DatasourceList {
if d != "Azure" {
return fmt.Errorf("only 'Azure' is allowed as an item in the datasource_list")
if !slices.Contains(allowedDatasources, d) {
return fmt.Errorf("datasource %s is not allowed, only %v are allowed", d, allowedDatasources)
}
}
}

View file

@ -37,6 +37,11 @@ type KickstartStageOptions struct {
ClearPart *ClearPartOptions `json:"clearpart,omitempty"`
AutoPart *AutoPartOptions `json:"autopart,omitempty"`
Network []NetworkOptions `json:"network,omitempty"`
Bootloader *BootloaderOptions `json:"bootloader,omitempty"`
}
type BootloaderOptions struct {
Append string `json:"append"`
}
type LiveIMGOptions struct {

View file

@ -25,6 +25,7 @@ type repository struct {
ModuleHotfixes *bool `json:"module_hotfixes,omitempty"`
MetadataExpire string `json:"metadata_expire,omitempty"`
ImageTypeTags []string `json:"image_type_tags,omitempty"`
PackageSets []string `json:"package_sets,omitempty"`
}
type RepoConfig struct {
@ -264,6 +265,7 @@ func LoadRepositoriesFromFile(filename string) (map[string][]RepoConfig, error)
MetadataExpire: repo.MetadataExpire,
ModuleHotfixes: repo.ModuleHotfixes,
ImageTypeTags: repo.ImageTypeTags,
PackageSets: repo.PackageSets,
}
repoConfigs[arch] = append(repoConfigs[arch], config)

View file

@ -3,6 +3,7 @@ project_name: govmomi
builds:
- id: govc
no_main_check: true
goos: &goos-defs
- linux
- darwin
@ -13,6 +14,7 @@ builds:
- arm
- arm64
- mips64le
- s390x
env:
- CGO_ENABLED=0
- PKGPATH=github.com/vmware/govmomi/govc/flags
@ -21,6 +23,7 @@ builds:
ldflags:
- "-X {{.Env.PKGPATH}}.BuildVersion={{.Version}} -X {{.Env.PKGPATH}}.BuildCommit={{.ShortCommit}} -X {{.Env.PKGPATH}}.BuildDate={{.Date}}"
- id: vcsim
no_main_check: true
goos: *goos-defs
goarch: *goarch-defs
env:
@ -30,6 +33,18 @@ builds:
ldflags:
- "-X main.buildVersion={{.Version}} -X main.buildCommit={{.ShortCommit}} -X main.buildDate={{.Date}}"
nfpms:
- package_name: govmomi
builds:
- govc
- vcsim
homepage: https://github.com/vmware/govmomi
maintainer: Doug MacEachern <dougm@vmware.com>
description: |-
vSphere CLI
formats:
- rpm
archives:
- id: govcbuild
builds:
@ -90,7 +105,7 @@ brews:
commit_author:
name: Alfred the Narwhal
email: cna-alfred@vmware.com
folder: Formula
directory: Formula
homepage: "https://github.com/vmware/govmomi/blob/main/govc/README.md"
description: "govc is a vSphere CLI built on top of govmomi."
test: |
@ -110,7 +125,7 @@ brews:
commit_author:
name: Alfred the Narwhal
email: cna-alfred@vmware.com
folder: Formula
directory: Formula
homepage: "https://github.com/vmware/govmomi/blob/main/vcsim/README.md"
description: "vcsim is a vSphere API simulator built on top of govmomi."
test: |

View file

@ -46,6 +46,7 @@ Parveen Chahal <parkuma@microsoft.com> <mail.chahal@gmail.com>
Pieter Noordhuis <pnoordhuis@vmware.com> <pcnoordhuis@gmail.com>
Ricardo Katz <rkatz@vmware.com> <rikatz@users.noreply.github.com>
Saad Malik <saad@spectrocloud.com> <simfox3@gmail.com>
Stoyan Zhelyazkov <stoyan.zhelyazkov@broadcom.com> <156204153+stoyanzhelyazkov@users.noreply.github.com>
Takaaki Furukawa <takaaki.frkw@gmail.com> takaaki.furukawa <takaaki.furukawa@mail.rakuten.com>
Takaaki Furukawa <takaaki.frkw@gmail.com> tkak <takaaki.frkw@gmail.com>
Uwe Bessle <Uwe.Bessle@iteratec.de> Uwe Bessle <u.bessle.extern@eos-ts.com>

File diff suppressed because it is too large Load diff

View file

@ -61,9 +61,25 @@ type Client struct {
// NewClient creates a new CNS client
func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) {
sc := c.Client.NewServiceClient(Path, Namespace)
sc.Namespace = c.Namespace
sc.Version = c.Version
return &Client{sc, sc, c}, nil
// Use current vCenter vsan version by default
err := sc.UseServiceVersion(Namespace)
if err != nil {
return nil, err
}
// PropertyCollector related methods (task.Wait) need to send requests to vim25.Path (/sdk).
// This vim25.Client shares the same http.Transport and Namespace/Version, but requests to '/sdk'
rt := sc.NewServiceClient(vim25.Path, Namespace)
rt.Version = sc.Version
vc := &vim25.Client{
ServiceContent: c.ServiceContent,
Client: rt,
RoundTripper: rt,
}
return &Client{sc, sc, vc}, nil
}
// RoundTrip dispatches to the RoundTripper field.

View file

@ -430,7 +430,6 @@ func (flag *ClientFlag) CnsClient() (*cns.Client, error) {
if err != nil {
return nil, err
}
_ = vc.UseServiceVersion("vsan")
c, err := cns.NewClient(context.Background(), vc)
if err != nil {

View file

@ -21,5 +21,5 @@ const (
ClientName = "govmomi"
// ClientVersion is the version of this SDK
ClientVersion = "0.36.3"
ClientVersion = "0.37.1"
)

View file

@ -142,9 +142,29 @@ func (p *Collector) CancelWaitForUpdates(ctx context.Context) error {
return err
}
func (p *Collector) RetrieveProperties(ctx context.Context, req types.RetrieveProperties) (*types.RetrievePropertiesResponse, error) {
req.This = p.Reference()
return methods.RetrieveProperties(ctx, p.roundTripper, &req)
// RetrieveProperties wraps RetrievePropertiesEx and ContinueRetrievePropertiesEx to collect properties in batches.
func (p *Collector) RetrieveProperties(
ctx context.Context,
req types.RetrieveProperties,
maxObjectsArgs ...int32) (*types.RetrievePropertiesResponse, error) {
var opts types.RetrieveOptions
if l := len(maxObjectsArgs); l > 1 {
return nil, fmt.Errorf("maxObjectsArgs accepts a single value")
} else if l == 1 {
opts.MaxObjects = maxObjectsArgs[0]
}
objects, err := mo.RetrievePropertiesEx(ctx, p.roundTripper, types.RetrievePropertiesEx{
This: p.Reference(),
SpecSet: req.SpecSet,
Options: opts,
})
if err != nil {
return nil, err
}
return &types.RetrievePropertiesResponse{Returnval: objects}, nil
}
// Retrieve loads properties for a slice of managed objects. The dst argument

View file

@ -141,7 +141,7 @@ func WaitEx(
func(updates []types.ObjectUpdate) bool {
for _, update := range updates {
// Only look at updates for the expected task object.
if update.Obj == ref {
if update.Obj.Value == ref.Value && update.Obj.Type == ref.Type {
if cb.fn(update.ChangeSet) {
return true
}

View file

@ -1,5 +1,5 @@
/*
Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved.
Copyright (c) 2018-2024 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -322,3 +322,11 @@ func (c *Manager) DeleteSubscriber(ctx context.Context, library *Library, subscr
url := c.Resource(internal.Subscriptions).WithID(library.ID).WithAction("delete")
return c.Do(ctx, url.Request(http.MethodPost, &id), nil)
}
// EvictSubscribedLibrary evicts the cached content of an on-demand subscribed library.
// This operation allows the cached content of a subscribed library to be removed to free up storage capacity.
func (c *Manager) EvictSubscribedLibrary(ctx context.Context, library *Library) error {
path := internal.SubscribedLibraryPath
url := c.Resource(path).WithID(library.ID).WithAction("evict")
return c.Do(ctx, url.Request(http.MethodPost), nil)
}

View file

@ -206,3 +206,11 @@ func (c *Manager) FindLibraryItems(
var res []string
return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res)
}
// EvictSubscribedLibraryItem evicts the cached content of a library item in an on-demand subscribed library.
// This operation allows the cached content of a subscribed library item to be removed to free up storage capacity.
func (c *Manager) EvictSubscribedLibraryItem(ctx context.Context, item *Item) error {
path := internal.SubscribedLibraryItem
url := c.Resource(path).WithID(item.ID).WithAction("evict")
return c.Do(ctx, url.Request(http.MethodPost), nil)
}

View file

@ -19,15 +19,11 @@ package vim25
import (
"context"
"encoding/json"
"fmt"
"net/http"
"path"
"strings"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"github.com/vmware/govmomi/vim25/xml"
)
const (
@ -88,42 +84,6 @@ func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) {
return &c, nil
}
// UseServiceVersion sets soap.Client.Version to the current version of the service endpoint via /sdk/vimServiceVersions.xml
func (c *Client) UseServiceVersion(kind ...string) error {
ns := "vim"
if len(kind) != 0 {
ns = kind[0]
}
u := c.URL()
u.Path = path.Join(Path, ns+"ServiceVersions.xml")
res, err := c.Get(u.String())
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("http.Get(%s): %s", u.Path, err)
}
v := struct {
Namespace *string `xml:"namespace>name"`
Version *string `xml:"namespace>version"`
}{
&c.Namespace,
&c.Version,
}
err = xml.NewDecoder(res.Body).Decode(&v)
_ = res.Body.Close()
if err != nil {
return fmt.Errorf("xml.Decode(%s): %s", u.Path, err)
}
return nil
}
// RoundTrip dispatches to the RoundTripper field.
func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error {
return c.RoundTripper.RoundTrip(ctx, req, res)

Some files were not shown because too many files have changed in this diff Show more