go.mod,vendor: run go get -u github.com/osbuild/images

This commit updates the "images" dependency.
This commit is contained in:
Michael Vogt 2025-01-10 16:00:57 +01:00 committed by Tomáš Hozza
parent a18ec71dc8
commit 9dd8b2c50f
87 changed files with 1266 additions and 706 deletions

14
go.mod
View file

@ -10,11 +10,11 @@ require (
cloud.google.com/go/compute v1.31.1
cloud.google.com/go/storage v1.44.0
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
github.com/Azure/go-autorest/autorest v0.11.29
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13
github.com/BurntSushi/toml v1.4.0
@ -33,7 +33,7 @@ require (
github.com/getkin/kin-openapi v0.93.0
github.com/getsentry/sentry-go v0.28.1
github.com/gobwas/glob v0.2.3
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/golang-jwt/jwt/v4 v4.5.1
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud v1.14.0
@ -46,7 +46,7 @@ require (
github.com/labstack/gommon v0.4.2
github.com/openshift-online/ocm-sdk-go v0.1.438
github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.105.0
github.com/osbuild/images v0.109.0
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d
github.com/osbuild/pulp-client v0.1.0
github.com/prometheus/client_golang v1.20.2
@ -57,9 +57,9 @@ require (
github.com/ubccr/kerby v0.0.0-20230802201021-412be7bfaee5
github.com/vmware/govmomi v0.42.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/oauth2 v0.24.0
golang.org/x/oauth2 v0.25.0
golang.org/x/sync v0.10.0
golang.org/x/sys v0.28.0
golang.org/x/sys v0.29.0
google.golang.org/api v0.214.0
)
@ -72,7 +72,7 @@ require (
cloud.google.com/go/iam v1.2.2 // indirect
cloud.google.com/go/monitoring v1.21.2 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect

37
go.sum
View file

@ -29,10 +29,12 @@ github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
@ -47,8 +49,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 h1:cf+OIKbkmMHBaC3u78AXomweqM0oxQSgBXRZf3WH4yM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQKJxSMNiGJcq4QuUQkOynyD93gLw6MDF7ek=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
@ -76,6 +78,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@ -198,6 +202,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
@ -286,8 +292,9 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@ -429,6 +436,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
@ -532,8 +541,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.438 h1:tsLCCUzbLCTL4RZG02y9RuopmGCXp
github.com/openshift-online/ocm-sdk-go v0.1.438/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.105.0 h1:KVFKmBhxDzpdZuzLfM84TpfNP40feC5DjRKn+OJcOZ8=
github.com/osbuild/images v0.105.0/go.mod h1:4bNmMQOVadIKVC1q8zsLO8tdEQFH90zIp+MQBQUnCiE=
github.com/osbuild/images v0.109.0 h1:yLY1Ul6O/4fp+y+UI2XIML9CcsKdPNTARk95hiz1syY=
github.com/osbuild/images v0.109.0/go.mod h1:58tzp7jV50rjaH9gMpvmQdVati0c4TaC5Op7wmSD/tY=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d h1:r9BFPDv0uuA9k1947Jybcxs36c/pTywWS1gjeizvtcQ=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d/go.mod h1:zR1iu/hOuf+OQNJlk70tju9IqzzM4ycq0ectkFBm94U=
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=
@ -563,6 +572,8 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -752,8 +763,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -791,8 +802,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

View file

@ -1,5 +1,30 @@
# Release History
## 1.16.0 (2024-10-17)
### Features Added
* Added field `Kind` to `runtime.StartSpanOptions` to allow a kind to be set when starting a span.
### Bugs Fixed
* `BearerTokenPolicy` now rewinds request bodies before retrying
## 1.15.0 (2024-10-14)
### Features Added
* `BearerTokenPolicy` handles CAE claims challenges
### Bugs Fixed
* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled.
* Fixed an integer overflow in the retry policy.
### Other Changes
* Update dependencies.
## 1.14.0 (2024-08-07)
### Features Added

View file

@ -5,7 +5,6 @@ package runtime
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"strings"
@ -66,31 +65,16 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok
p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{
InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP,
AuthorizationHandler: azpolicy.AuthorizationHandler{
OnChallenge: p.onChallenge,
OnRequest: p.onRequest,
OnRequest: p.onRequest,
},
})
return p
}
func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error {
challenge := res.Header.Get(shared.HeaderWWWAuthenticate)
claims, err := parseChallenge(challenge)
if err != nil {
// the challenge contains claims we can't parse
return err
} else if claims != "" {
// request a new token having the specified claims, send the request again
return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes})
}
// auth challenge didn't include claims, so this is a simple authorization failure
return azruntime.NewResponseError(res)
}
// onRequest authorizes requests with one or more bearer tokens
func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error {
// authorize the request with a token for the primary tenant
err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes})
err := authNZ(azpolicy.TokenRequestOptions{Scopes: b.scopes})
if err != nil || len(b.auxResources) == 0 {
return err
}
@ -116,31 +100,3 @@ func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolic
func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
return b.btp.Do(req)
}
// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token
// that will satisfy conditional access policies. It returns a non-nil error when the given value contains
// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error.
func parseChallenge(wwwAuthenticate string) (string, error) {
claims := ""
var err error
for _, param := range strings.Split(wwwAuthenticate, ",") {
if _, after, found := strings.Cut(param, "claims="); found {
if claims != "" {
// The header contains multiple challenges, at least two of which specify claims. The specs allow this
// but it's unclear what a client should do in this case and there's as yet no concrete example of it.
err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate)
break
}
// trim stuff that would get an error from RawURLEncoding; claims may or may not be padded
claims = strings.Trim(after, `\"=`)
// we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42"
if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil {
claims = string(b)
} else {
err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate)
break
}
}
}
return claims, err
}

View file

@ -11,4 +11,7 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
//
// When marshaling instances, the RawResponse field will be omitted.
// However, the contents returned by Error() will be preserved.
type ResponseError = exported.ResponseError

View file

@ -117,12 +117,18 @@ type ResponseError struct {
StatusCode int
// RawResponse is the underlying HTTP response.
RawResponse *http.Response
RawResponse *http.Response `json:"-"`
errMsg string
}
// Error implements the error interface for type ResponseError.
// Note that the message contents are not contractual and can change over time.
func (e *ResponseError) Error() string {
if e.errMsg != "" {
return e.errMsg
}
const separator = "--------------------------------------------------------------------------------"
// write the request method and URL with response status code
msg := &bytes.Buffer{}
@ -163,5 +169,33 @@ func (e *ResponseError) Error() string {
}
fmt.Fprintln(msg, separator)
return msg.String()
e.errMsg = msg.String()
return e.errMsg
}
// internal type used for marshaling/unmarshaling
type responseError struct {
ErrorCode string `json:"errorCode"`
StatusCode int `json:"statusCode"`
ErrorMessage string `json:"errorMessage"`
}
func (e ResponseError) MarshalJSON() ([]byte, error) {
return json.Marshal(responseError{
ErrorCode: e.ErrorCode,
StatusCode: e.StatusCode,
ErrorMessage: e.Error(),
})
}
func (e *ResponseError) UnmarshalJSON(data []byte) error {
re := responseError{}
if err := json.Unmarshal(data, &re); err != nil {
return err
}
e.ErrorCode = re.ErrorCode
e.StatusCode = re.StatusCode
e.errMsg = re.ErrorMessage
return nil
}

View file

@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.14.0"
Version = "v1.16.0"
)

View file

@ -161,19 +161,20 @@ type BearerTokenOptions struct {
// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request.
type AuthorizationHandler struct {
// OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token
// from the policy's given credential. Implementations that need to perform I/O should use the Request's context,
// available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't
// send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a
// token from its credential according to its configuration.
// OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest
// whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request
// with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context,
// available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send
// the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token
// from its credential according to its configuration.
OnRequest func(*Request, func(TokenRequestOptions) error) error
// OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the
// request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible
// for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's
// given credential. Implementations that need to perform I/O should use the Request's context, available from
// Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil,
// the policy will return any 401 response to the client.
// OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon
// receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle.
// OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the
// Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given
// TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When
// OnChallenge returns nil, the policy will send the Request again.
OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error
}

View file

@ -4,9 +4,12 @@
package runtime
import (
"encoding/base64"
"errors"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
@ -17,6 +20,11 @@ import (
)
// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle
// additional authentication challenges, or needing more control over authorization, should
// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions].
//
// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation
type BearerTokenPolicy struct {
// mainResource is the resource to be retreived using the tenant specified in the credential
mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState]
@ -51,8 +59,18 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
if opts == nil {
opts = &policy.BearerTokenOptions{}
}
ah := opts.AuthorizationHandler
if ah.OnRequest == nil {
// Set a default OnRequest that simply requests a token with the given scopes. OnChallenge
// doesn't get a default so the policy can use a nil check to determine whether the caller
// provided an implementation.
ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error {
// authNZ sets EnableCAE: true in all cases, no need to duplicate that here
return authNZ(policy.TokenRequestOptions{Scopes: scopes})
}
}
return &BearerTokenPolicy{
authzHandler: opts.AuthorizationHandler,
authzHandler: ah,
cred: cred,
scopes: scopes,
mainResource: temporal.NewResource(acquire),
@ -63,6 +81,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential
func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error {
return func(tro policy.TokenRequestOptions) error {
tro.EnableCAE = true
as := acquiringResourceState{p: b, req: req, tro: tro}
tk, err := b.mainResource.Get(as)
if err != nil {
@ -86,12 +105,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return nil, err
}
var err error
if b.authzHandler.OnRequest != nil {
err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
} else {
err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes})
}
err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
if err != nil {
return nil, errorinfo.NonRetriableError(err)
}
@ -101,17 +115,54 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return nil, err
}
res, err = b.handleChallenge(req, res, false)
return res, err
}
// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling
// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge.
// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the
// AuthorizationHandler.
func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) {
var err error
if res.StatusCode == http.StatusUnauthorized {
b.mainResource.Expire()
if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil {
if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
res, err = req.Next()
if res.Header.Get(shared.HeaderWWWAuthenticate) != "" {
caeChallenge, parseErr := parseCAEChallenge(res)
if parseErr != nil {
return res, parseErr
}
switch {
case caeChallenge != nil:
authNZ := func(tro policy.TokenRequestOptions) error {
// Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value
// will be empty at time of writing because CAE is the only feature involving claims. If in
// the future some client needs to specify unrelated claims, this function may need to merge
// them with the challenge claims.
tro.Claims = caeChallenge.params["claims"]
return b.authenticateAndAuthorize(req)(tro)
}
if err = b.authzHandler.OnRequest(req, authNZ); err == nil {
if err = req.RewindBody(); err == nil {
res, err = req.Next()
}
}
case b.authzHandler.OnChallenge != nil && !recursed:
if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
if err = req.RewindBody(); err == nil {
if res, err = req.Next(); err == nil {
res, err = b.handleChallenge(req, res, true)
}
}
} else {
// don't retry challenge handling errors
err = errorinfo.NonRetriableError(err)
}
default:
// return the response to the pipeline
}
}
}
if err != nil {
err = errorinfo.NonRetriableError(err)
}
return res, err
}
@ -121,3 +172,65 @@ func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error {
}
return nil
}
// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none).
// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError.
func parseCAEChallenge(res *http.Response) (*authChallenge, error) {
var (
caeChallenge *authChallenge
err error
)
for _, c := range parseChallenges(res) {
if c.scheme == "Bearer" {
if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" {
if b, de := base64.StdEncoding.DecodeString(claims); de == nil {
c.params["claims"] = string(b)
caeChallenge = &c
} else {
// don't include the decoding error because it's something
// unhelpful like "illegal base64 data at input byte 42"
err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims))
}
break
}
}
}
return caeChallenge, err
}
var (
challenge, challengeParams *regexp.Regexp
once = &sync.Once{}
)
type authChallenge struct {
scheme string
params map[string]string
}
// parseChallenges assumes authentication challenges have quoted parameter values
func parseChallenges(res *http.Response) []authChallenge {
once.Do(func() {
// matches challenges having quoted parameters, capturing scheme and parameters
challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`)
// captures parameter names and values in a match of the above expression
challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`)
})
parsed := []authChallenge{}
// WWW-Authenticate can have multiple values, each containing multiple challenges
for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) {
for _, sm := range challenge.FindAllStringSubmatch(h, -1) {
// sm is [challenge, scheme, params] (see regexp documentation on submatches)
c := authChallenge{
params: make(map[string]string),
scheme: sm[1],
}
for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) {
// sm is [key="value", key, value] (see regexp documentation on submatches)
c.params[sm[1]] = sm[2]
}
parsed = append(parsed, c)
}
}
return parsed
}

View file

@ -96,6 +96,8 @@ func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err erro
// StartSpanOptions contains the optional values for StartSpan.
type StartSpanOptions struct {
// Kind indicates the kind of Span.
Kind tracing.SpanKind
// Attributes contains key-value pairs of attributes for the span.
Attributes []tracing.Attribute
}
@ -115,7 +117,6 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options
// we MUST propagate the active tracer before returning so that the trace policy can access it
ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer)
const newSpanKind = tracing.SpanKindInternal
if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil {
// per the design guidelines, if a SDK method Foo() calls SDK method Bar(),
// then the span for Bar() must be suppressed. however, if Bar() makes a REST
@ -131,12 +132,15 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options
if options == nil {
options = &StartSpanOptions{}
}
if options.Kind == 0 {
options.Kind = tracing.SpanKindInternal
}
ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{
Kind: newSpanKind,
Kind: options.Kind,
Attributes: options.Attributes,
})
ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind)
ctx = context.WithValue(ctx, ctxActiveSpan{}, options.Kind)
return ctx, func(err error) {
if err != nil {
errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1)

View file

@ -59,13 +59,33 @@ func setDefaults(o *policy.RetryOptions) {
}
func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0
delay := time.Duration((1<<try)-1) * o.RetryDelay
// avoid overflow when shifting left
factor := time.Duration(math.MaxInt64)
if try < 63 {
factor = time.Duration(int64(1<<try) - 1)
}
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand
if delay > o.MaxRetryDelay {
delay := factor * o.RetryDelay
if delay < factor {
// overflow has happened so set to max value
delay = time.Duration(math.MaxInt64)
}
// Introduce jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
jitterMultiplier := rand.Float64()/2 + 0.8 // NOTE: We want math/rand; not crypto/rand
delayFloat := float64(delay) * jitterMultiplier
if delayFloat > float64(math.MaxInt64) {
// the jitter pushed us over MaxInt64, so just use MaxInt64
delay = time.Duration(math.MaxInt64)
} else {
delay = time.Duration(delayFloat)
}
if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value
delay = o.MaxRetryDelay
}
return delay
}

View file

@ -0,0 +1,10 @@
# Breaking Changes
## v1.6.0
### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios
As of `azidentity` v1.6.0, `DefaultAzureCredential` makes a minor behavioral change when it uses IMDS managed
identity. It sends its first request to IMDS without the "Metadata" header, to expedite validating whether the endpoint
is available. This precedes the credential's first token request and is guaranteed to fail with a 400 error. This error
response can appear in logs but doesn't indicate authentication failed.

View file

@ -1,5 +1,52 @@
# Release History
## 1.8.0 (2024-10-08)
### Other Changes
* `AzurePipelinesCredential` sets an additional OIDC request header so that it
receives a 401 instead of a 302 after presenting an invalid system access token
* Allow logging of debugging headers for `AzurePipelinesCredential` and include
them in error messages
## 1.8.0-beta.3 (2024-09-17)
### Features Added
* Added `ObjectID` type for `ManagedIdentityCredentialOptions.ID`
### Other Changes
* Removed redundant content from error messages
## 1.8.0-beta.2 (2024-08-06)
### Breaking Changes
* `NewManagedIdentityCredential` now returns an error when a user-assigned identity
is specified on a platform whose managed identity API doesn't support that.
`ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases.
Returning an error instead prevents the credential authenticating an unexpected
identity, causing a client to act with unexpected privileges. The affected
platforms are:
* Azure Arc
* Azure ML (when a resource ID is specified; client IDs are supported)
* Cloud Shell
* Service Fabric
### Other Changes
* If `DefaultAzureCredential` receives a non-JSON response when probing IMDS before
attempting to authenticate a managed identity, it continues to the next credential
in the chain instead of immediately returning an error.
## 1.8.0-beta.1 (2024-07-17)
### Features Added
* Restored persistent token caching feature
### Breaking Changes
> These changes affect only code written against a beta version such as v1.7.0-beta.1
* Redesigned the persistent caching API. Encryption is now required in all cases
and persistent cache construction is separate from credential construction.
The `PersistentUserAuthentication` example in the package docs has been updated
to demonstrate the new API.
## 1.7.0 (2024-06-20)
### Features Added

View file

@ -54,7 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID.
### DefaultAzureCredential
`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds:
`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds:
![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg)
@ -126,12 +126,17 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
## Credential Types
### Authenticating Azure Hosted Applications
### Credential chains
|Credential|Usage
|-|-
|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps
|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials
### Authenticating Azure-Hosted Applications
|Credential|Usage
|-|-
|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables
|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource
|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes
@ -158,7 +163,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|Credential|Usage
|-|-
|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI
|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
|[AzureDeveloperCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
## Environment Variables

View file

@ -1,57 +1,40 @@
## Token caching in the Azure Identity client module
*Token caching* is a feature provided by the Azure Identity library that allows apps to:
Token caching helps apps:
- Improve their resilience and performance.
- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens.
- Reduce the number of times the user is prompted to authenticate.
- Reduce the number of requests sent to Microsoft Entra ID to obtain access tokens.
- Reduce the number of times users are prompted to authenticate.
When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token.
When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID by sending an HTTP request and sometimes prompting a user to authenticate interactively. Credentials with caches (see [the below table](#credentials-supporting-token-caching) for a list) store access tokens either [in memory](#in-memory-token-caching) or, optionally, [on disk](#persistent-token-caching). These credentials return cached tokens whenever possible, to avoid unnecessary token requests or user interaction. Both cache implementations are safe for concurrent use.
Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested.
#### Caching can't be disabled
Whether a credential caches tokens isn't configurable. If a credential has a cache of either kind, it requests a new token only when it can't provide one from its cache. Azure SDK service clients have an additional, independent layer of in-memory token caching, to prevent redundant token requests. This cache works with any credential type, even a custom implementation defined outside the Azure SDK, and can't be disabled. Disabling token caching is therefore impossible when using Azure SDK clients or most `azidentity` credential types. However, in-memory caches can be cleared by constructing new credential and client instances.
### In-memory token caching
*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe.
**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library.
#### Caching cannot be disabled
As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance.
Credential types that support caching store tokens in memory by default and require no configuration to do so. Each instance of these types has its own cache, and two credential instances never share an in-memory cache.
### Persistent token caching
> Only azidentity v1.5.0-beta versions support persistent token caching
Some credential types support opt-in persistent token caching (see [the below table](#credentials-supporting-token-caching) for a list). This feature enables credentials to store and retrieve tokens across process executions, so an application doesn't need to authenticate every time it runs.
*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems.
Persistent caches are encrypted at rest using a mechanism that depends on the operating system:
| Operating system | Storage mechanism |
| Operating system | Encryption facility |
|------------------|---------------------------------------|
| Linux | kernel key retention service (keyctl) |
| macOS | Keychain |
| Windows | DPAPI |
| Windows | Data Protection API (DPAPI) |
By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform.
However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access.
With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which:
- Makes the app more resilient to failures.
- Ensures the app can continue to function during an Entra ID outage or disruption.
- Avoids having to prompt users to authenticate each time the process is restarted.
>IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains.
#### Example code
See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data.
Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data.
### Credentials supporting token caching
The following table indicates the state of in-memory and persistent caching in each credential type.
**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache).
**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example].
| Credential | In-memory token caching | Persistent token caching |
|--------------------------------|---------------------------------------------------------------------|--------------------------|
@ -66,6 +49,9 @@ The following table indicates the state of in-memory and persistent caching in e
| `EnvironmentCredential` | Supported | Not Supported |
| `InteractiveBrowserCredential` | Supported | Supported |
| `ManagedIdentityCredential` | Supported | Not Supported |
| `OnBehalfOfCredential` | Supported | Supported |
| `OnBehalfOfCredential` | Supported | Not Supported |
| `UsernamePasswordCredential` | Supported | Supported |
| `WorkloadIdentityCredential` | Supported | Supported |
[sp_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentServicePrincipalAuthentication
[user_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication

View file

@ -234,7 +234,7 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul
|---|---|---|
| AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.|
| No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.|
|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).|
|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).|
## Get additional help

View file

@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/azidentity",
"Tag": "go/azidentity_087379b475"
"Tag": "go/azidentity_c55452bbf6"
}

View file

@ -18,10 +18,10 @@ import (
var supportedAuthRecordVersions = []string{"1.0"}
// authenticationRecord is non-secret account information about an authenticated user that user credentials such as
// AuthenticationRecord is non-secret account information about an authenticated user that user credentials such as
// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication
// data. Call these credentials' Authenticate method to get an authenticationRecord for a user.
type authenticationRecord struct {
// data. Call these credentials' Authenticate method to get an AuthenticationRecord for a user.
type AuthenticationRecord struct {
// Authority is the URL of the authority that issued the token.
Authority string `json:"authority"`
@ -42,11 +42,11 @@ type authenticationRecord struct {
}
// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord
func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
func (a *AuthenticationRecord) UnmarshalJSON(b []byte) error {
// Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we
// want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally
// different type enables this by assigning all the fields without recursing into this method.
type r authenticationRecord
type r AuthenticationRecord
err := json.Unmarshal(b, (*r)(a))
if err != nil {
return err
@ -63,7 +63,7 @@ func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
}
// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued.
func (a *authenticationRecord) account() public.Account {
func (a *AuthenticationRecord) account() public.Account {
return public.Account{
Environment: a.Authority,
HomeAccountID: a.HomeAccountID,
@ -71,10 +71,10 @@ func (a *authenticationRecord) account() public.Account {
}
}
func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) {
func newAuthenticationRecord(ar public.AuthResult) (AuthenticationRecord, error) {
u, err := url.Parse(ar.IDToken.Issuer)
if err != nil {
return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
return AuthenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
}
tenant := ar.IDToken.TenantID
if tenant == "" {
@ -84,7 +84,7 @@ func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error)
if username == "" {
username = ar.IDToken.UPN
}
return authenticationRecord{
return AuthenticationRecord{
Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host),
ClientID: ar.IDToken.Audience,
HomeAccountID: ar.Account.HomeAccountID,

View file

@ -53,8 +53,14 @@ var (
errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names")
)
// tokenCachePersistenceOptions contains options for persistent token caching
type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions
// Cache represents a persistent cache that makes authentication data available across processes.
// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's
// [persistent user authentication example] shows how to use a persistent cache to reuse user
// logins across application runs. For service principal credential types such as
// [ClientCertificateCredential], simply set the Cache field on the credential options.
//
// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
type Cache = internal.Cache
// setAuthorityHost initializes the authority host for credentials. Precedence is:
// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user

View file

@ -20,6 +20,8 @@ const (
credNameAzurePipelines = "AzurePipelinesCredential"
oidcAPIVersion = "7.1"
systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI"
xMsEdgeRef = "x-msedge-ref"
xVssE2eId = "x-vss-e2eid"
)
// AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See
@ -40,6 +42,11 @@ type AzurePipelinesCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
@ -81,8 +88,11 @@ func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, system
if options == nil {
options = &AzurePipelinesCredentialOptions{}
}
// these headers are useful to the DevOps team when debugging OIDC error responses
options.ClientOptions.Logging.AllowedHeaders = append(options.ClientOptions.Logging.AllowedHeaders, xMsEdgeRef, xVssE2eId)
caco := ClientAssertionCredentialOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
@ -108,33 +118,40 @@ func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, er
url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID
url, err := runtime.EncodeQueryParams(url)
if err != nil {
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil)
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil)
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil)
}
req.Header.Set("Authorization", "Bearer "+a.systemAccessToken)
// instruct endpoint to return 401 instead of 302, if the system access token is invalid
req.Header.Set("X-TFS-FedAuthRedirect", "Suppress")
res, err := doForClient(a.cred.client.azClient, req)
if err != nil {
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil)
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil)
}
if res.StatusCode != http.StatusOK {
msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration"
msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration."
for _, h := range []string{xMsEdgeRef, xVssE2eId} {
if v := res.Header.Get(h); v != "" {
msg += fmt.Sprintf("\n%s: %s", h, v)
}
}
// include the response because its body, if any, probably contains an error message.
// OK responses aren't included with errors because they probably contain secrets
return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil)
return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res)
}
b, err := runtime.Payload(res)
if err != nil {
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil)
return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil)
}
var r struct {
OIDCToken string `json:"oidcToken"`
}
err = json.Unmarshal(b, &r)
if err != nil {
return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil)
return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil)
}
return r.OIDCToken, nil
}

View file

@ -113,11 +113,19 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token
if err != nil {
// return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise
msg := createChainedErrorMessage(errs)
if errors.As(err, &unavailableErr) {
var authFailedErr *AuthenticationFailedError
switch {
case errors.As(err, &authFailedErr):
err = newAuthenticationFailedError(c.name, msg, authFailedErr.RawResponse)
if af, ok := err.(*AuthenticationFailedError); ok {
// stop Error() printing the response again; it's already in msg
af.omitResponse = true
}
case errors.As(err, &unavailableErr):
err = newCredentialUnavailableError(c.name, msg)
} else {
default:
res := getResponseFromError(err)
err = newAuthenticationFailedError(c.name, msg, res, err)
err = newAuthenticationFailedError(c.name, msg, res)
}
}
return token, err
@ -126,7 +134,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token
func createChainedErrorMessage(errs []error) string {
msg := "failed to acquire a token.\nAttempted credentials:"
for _, err := range errs {
msg += fmt.Sprintf("\n\t%s", err.Error())
msg += fmt.Sprintf("\n\t%s", strings.ReplaceAll(err.Error(), "\n", "\n\t\t"))
}
return msg
}

View file

@ -26,16 +26,27 @@ extends:
parameters:
CloudConfig:
Public:
ServiceConnection: azure-sdk-tests
SubscriptionConfigurationFilePaths:
- eng/common/TestResources/sub-config/AzurePublicMsft.json
SubscriptionConfigurations:
- $(sub-config-azure-cloud-test-resources)
- $(sub-config-identity-test-resources)
EnvVars:
SYSTEM_ACCESSTOKEN: $(System.AccessToken)
EnableRaceDetector: true
RunLiveTests: true
ServiceDirectory: azidentity
UsePipelineProxy: false
${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}:
PreSteps:
- task: AzureCLI@2
displayName: Set OIDC token
inputs:
addSpnToEnvironment: true
azureSubscription: azure-sdk-tests
inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)"
scriptLocation: inlineScript
scriptType: pscore
MatrixConfigs:
- Name: managed_identity_matrix
GenerateVMJobs: true

View file

@ -37,14 +37,16 @@ type ClientAssertionCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
// tokenCachePersistenceOptions enables persistent token caching when not nil.
tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults.
@ -61,10 +63,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c
},
)
msalOpts := confidentialClientOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts)
if err != nil {

View file

@ -31,6 +31,11 @@ type ClientCertificateCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
@ -41,9 +46,6 @@ type ClientCertificateCredentialOptions struct {
// header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication.
// Defaults to False.
SendCertificateChain bool
// tokenCachePersistenceOptions enables persistent token caching when not nil.
tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// ClientCertificateCredential authenticates a service principal with a certificate.
@ -65,11 +67,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x
return nil, err
}
msalOpts := confidentialClientOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
SendX5C: options.SendCertificateChain,
tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
SendX5C: options.SendCertificateChain,
}
c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts)
if err != nil {

View file

@ -32,8 +32,10 @@ type ClientSecretCredentialOptions struct {
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
// tokenCachePersistenceOptions enables persistent token caching when not nil.
tokenCachePersistenceOptions *tokenCachePersistenceOptions
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
}
// ClientSecretCredential authenticates an application with a client secret.
@ -51,10 +53,10 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st
return nil, err
}
msalOpts := confidentialClientOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts)
if err != nil {

View file

@ -29,8 +29,8 @@ type confidentialClientOptions struct {
AdditionallyAllowedTenants []string
// Assertion for on-behalf-of authentication
Assertion string
Cache Cache
DisableInstanceDiscovery, SendX5C bool
tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// confidentialClient wraps the MSAL confidential client
@ -107,12 +107,12 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque
}
}
if err != nil {
// We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code.
// We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError.
var unavailableErr credentialUnavailable
if !errors.As(err, &unavailableErr) {
res := getResponseFromError(err)
err = newAuthenticationFailedError(c.name, err.Error(), res, err)
var (
authFailedErr *AuthenticationFailedError
unavailableErr credentialUnavailable
)
if !(errors.As(err, &unavailableErr) || errors.As(err, &authFailedErr)) {
err = newAuthenticationFailedErrorFromMSAL(c.name, err)
}
} else {
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", "))
@ -145,7 +145,7 @@ func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfide
}
func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) {
cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE)
cache, err := internal.ExportReplace(c.opts.Cache, enableCAE)
if err != nil {
return nil, err
}

View file

@ -36,10 +36,13 @@ type DefaultAzureCredentialOptions struct {
TenantID string
}
// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure.
// It combines credentials suitable for deployment with credentials suitable for local development.
// It attempts to authenticate with each of these credential types, in the following order, stopping
// when one provides a token:
// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by
// combining credentials used in Azure hosting environments and credentials used in local development. In
// production, it's better to use a specific credential type so authentication is more predictable and easier
// to debug.
//
// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order,
// stopping when one provides a token:
//
// - [EnvironmentCredential]
// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload

View file

@ -25,18 +25,26 @@ type DeviceCodeCredentialOptions struct {
// tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
AdditionallyAllowedTenants []string
// authenticationRecord returned by a call to a credential's Authenticate method. Set this option
// AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
authenticationRecord authenticationRecord
AuthenticationRecord AuthenticationRecord
// ClientID is the ID of the application users will authenticate to.
// Defaults to the ID of an Azure development application.
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
// ClientID is the ID of the application to which users will authenticate. When not set, users
// will authenticate to an Azure development application, which isn't recommended for production
// scenarios. In production, developers should instead register their applications and assign
// appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more
// information.
ClientID string
// disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
// When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
// DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
// When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary
// to acquire a token.
disableAutomaticAuthentication bool
DisableAutomaticAuthentication bool
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
@ -49,9 +57,6 @@ type DeviceCodeCredentialOptions struct {
// applications.
TenantID string
// tokenCachePersistenceOptions enables persistent token caching when not nil.
tokenCachePersistenceOptions *tokenCachePersistenceOptions
// UserPrompt controls how the credential presents authentication instructions. The credential calls
// this function with authentication details when it receives a device code. By default, the credential
// prints these details to stdout.
@ -101,12 +106,12 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
cp.init()
msalOpts := publicClientOptions{
AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
Cache: cp.Cache,
ClientOptions: cp.ClientOptions,
DeviceCodePrompt: cp.UserPrompt,
DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication,
DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
Record: cp.authenticationRecord,
TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
Record: cp.AuthenticationRecord,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts)
if err != nil {
@ -116,8 +121,9 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
return &DeviceCodeCredential{client: c}, nil
}
// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
// Authenticate prompts a user to log in via the device code flow. Subsequent
// GetToken calls will automatically use the returned AuthenticationRecord.
func (c *DeviceCodeCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()

View file

@ -38,18 +38,30 @@ type AuthenticationFailedError struct {
// RawResponse is the HTTP response motivating the error, if available.
RawResponse *http.Response
credType string
message string
err error
credType, message string
omitResponse bool
}
func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error {
return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err}
func newAuthenticationFailedError(credType, message string, resp *http.Response) error {
return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp}
}
// newAuthenticationFailedErrorFromMSAL creates an AuthenticationFailedError from an MSAL error.
// If the error is an MSAL CallErr, the new error includes an HTTP response and not the MSAL error
// message, because that message is redundant given the response. If the original error isn't a
// CallErr, the returned error incorporates its message.
func newAuthenticationFailedErrorFromMSAL(credType string, err error) error {
msg := ""
res := getResponseFromError(err)
if res == nil {
msg = err.Error()
}
return newAuthenticationFailedError(credType, msg, res)
}
// Error implements the error interface. Note that the message contents are not contractual and can change over time.
func (e *AuthenticationFailedError) Error() string {
if e.RawResponse == nil {
if e.RawResponse == nil || e.omitResponse {
return e.credType + ": " + e.message
}
msg := &bytes.Buffer{}
@ -62,7 +74,7 @@ func (e *AuthenticationFailedError) Error() string {
fmt.Fprintln(msg, "Request information not available")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status)
fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status)
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
body, err := runtime.Payload(e.RawResponse)
switch {
@ -109,17 +121,17 @@ func (*AuthenticationFailedError) NonRetriable() {
var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil)
// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token
// AuthenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token
// because the credential requires user interaction and is configured not to request it automatically.
type authenticationRequiredError struct {
type AuthenticationRequiredError struct {
credentialUnavailableError
// TokenRequestOptions for the required token. Pass this to the credential's Authenticate method.
TokenRequestOptions policy.TokenRequestOptions
}
func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error {
return &authenticationRequiredError{
func newAuthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error {
return &AuthenticationRequiredError{
credentialUnavailableError: credentialUnavailableError{
credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively",
},
@ -128,8 +140,8 @@ func newauthenticationRequiredError(credType string, tro policy.TokenRequestOpti
}
var (
_ credentialUnavailable = (*authenticationRequiredError)(nil)
_ errorinfo.NonRetriable = (*authenticationRequiredError)(nil)
_ credentialUnavailable = (*AuthenticationRequiredError)(nil)
_ errorinfo.NonRetriable = (*AuthenticationRequiredError)(nil)
)
type credentialUnavailable interface {

View file

@ -1,60 +0,0 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -24,18 +24,26 @@ type InteractiveBrowserCredentialOptions struct {
// tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
AdditionallyAllowedTenants []string
// authenticationRecord returned by a call to a credential's Authenticate method. Set this option
// AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
authenticationRecord authenticationRecord
AuthenticationRecord AuthenticationRecord
// ClientID is the ID of the application users will authenticate to.
// Defaults to the ID of an Azure development application.
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
// ClientID is the ID of the application to which users will authenticate. When not set, users
// will authenticate to an Azure development application, which isn't recommended for production
// scenarios. In production, developers should instead register their applications and assign
// appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more
// information.
ClientID string
// disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
// When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
// DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
// When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary
// to acquire a token.
disableAutomaticAuthentication bool
DisableAutomaticAuthentication bool
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
@ -54,9 +62,6 @@ type InteractiveBrowserCredentialOptions struct {
// TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the
// "organizations" tenant, which can authenticate work and school accounts.
TenantID string
// tokenCachePersistenceOptions enables persistent token caching when not nil.
tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
func (o *InteractiveBrowserCredentialOptions) init() {
@ -82,13 +87,13 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
cp.init()
msalOpts := publicClientOptions{
AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
Cache: cp.Cache,
ClientOptions: cp.ClientOptions,
DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication,
DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
LoginHint: cp.LoginHint,
Record: cp.authenticationRecord,
Record: cp.AuthenticationRecord,
RedirectURL: cp.RedirectURL,
TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts)
if err != nil {
@ -97,8 +102,9 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
return &InteractiveBrowserCredential{client: c}, nil
}
// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
// Authenticate opens the default browser so a user can log in. Subsequent
// GetToken calls will automatically use the returned AuthenticationRecord.
func (c *InteractiveBrowserCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()

View file

@ -0,0 +1,86 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package internal
import (
"sync"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
)
// Cache represents a persistent cache that makes authentication data available across processes.
// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's
// [persistent user authentication example] shows how to use a persistent cache to reuse user
// logins across application runs. For service principal credential types such as
// [ClientCertificateCredential], simply set the Cache field on the credential options.
//
// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
type Cache struct {
// impl is a pointer so a Cache can carry persistent state across copies
impl *impl
}
// impl is a Cache's private implementation
type impl struct {
// factory constructs storage implementations
factory func(bool) (cache.ExportReplace, error)
// cae and noCAE are previously constructed storage implementations. CAE
// and non-CAE tokens must be stored separately because MSAL's cache doesn't
// observe token claims. If a single storage implementation held both kinds
// of tokens, it could create a reauthentication or error loop by returning
// a non-CAE token lacking a required claim.
cae, noCAE cache.ExportReplace
// mu synchronizes around cae and noCAE
mu *sync.RWMutex
}
func (i *impl) exportReplace(cae bool) (cache.ExportReplace, error) {
if i == nil {
// zero-value Cache: return a nil ExportReplace and MSAL will cache in memory
return nil, nil
}
var (
err error
xr cache.ExportReplace
)
i.mu.RLock()
xr = i.cae
if !cae {
xr = i.noCAE
}
i.mu.RUnlock()
if xr != nil {
return xr, nil
}
i.mu.Lock()
defer i.mu.Unlock()
if cae {
if i.cae == nil {
if xr, err = i.factory(cae); err == nil {
i.cae = xr
}
}
return i.cae, err
}
if i.noCAE == nil {
if xr, err = i.factory(cae); err == nil {
i.noCAE = xr
}
}
return i.noCAE, err
}
// NewCache is the constructor for Cache. It takes a factory instead of an instance
// because it doesn't know whether the Cache will store both CAE and non-CAE tokens.
func NewCache(factory func(cae bool) (cache.ExportReplace, error)) Cache {
return Cache{&impl{factory: factory, mu: &sync.RWMutex{}}}
}
// ExportReplace returns an implementation satisfying MSAL's ExportReplace interface.
// It's a function instead of a method on Cache so packages in azidentity and
// azidentity/cache can call it while applications can't. "cae" declares whether the
// caller intends this implementation to store CAE tokens.
func ExportReplace(c Cache, cae bool) (cache.ExportReplace, error) {
return c.impl.exportReplace(cae)
}

View file

@ -1,18 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package internal
// TokenCachePersistenceOptions contains options for persistent token caching
type TokenCachePersistenceOptions struct {
// AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text
// when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts
// encryption before falling back to plaintext storage.
AllowUnencryptedStorage bool
// Name identifies the cache. Set this to isolate data from other applications.
Name string
}

View file

@ -1,31 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package internal
import (
"errors"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
)
var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching")
// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to
// use a persistent cache must first import the cache module, which will replace this function
// with a platform-specific implementation.
var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) {
if o == nil {
return nil, nil
}
return nil, errMissingImport
}
// CacheFilePath returns the path to the cache file for the given name.
// Defining it in this package makes it available to azidentity tests.
var CacheFilePath = func(name string) (string, error) {
return "", errMissingImport
}

View file

@ -143,6 +143,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
if endpoint, ok := os.LookupEnv(identityEndpoint); ok {
if _, ok := os.LookupEnv(identityHeader); ok {
if _, ok := os.LookupEnv(identityServerThumbprint); ok {
if options.ID != nil {
return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime")
}
env = "Service Fabric"
c.endpoint = endpoint
c.msiType = msiTypeServiceFabric
@ -152,6 +155,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
c.msiType = msiTypeAppService
}
} else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok {
if options.ID != nil {
return nil, errors.New("the Azure Arc API doesn't support specifying a user-assigned managed identity at runtime")
}
env = "Azure Arc"
c.endpoint = endpoint
c.msiType = msiTypeAzureArc
@ -159,9 +165,15 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
} else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
c.endpoint = endpoint
if _, ok := os.LookupEnv(msiSecret); ok {
if options.ID != nil && options.ID.idKind() != miClientID {
return nil, errors.New("the Azure ML API supports specifying a user-assigned managed identity by client ID only")
}
env = "Azure ML"
c.msiType = msiTypeAzureML
} else {
if options.ID != nil {
return nil, errors.New("the Cloud Shell API doesn't support user-assigned managed identities")
}
env = "Cloud Shell"
c.msiType = msiTypeCloudShell
}
@ -207,9 +219,10 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
defer cancel()
cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1})
req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint)
if err == nil {
_, err = c.azClient.Pipeline().Do(req)
if err != nil {
return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err)
}
res, err := c.azClient.Pipeline().Do(req)
if err != nil {
msg := err.Error()
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
@ -217,7 +230,16 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
}
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg)
}
// send normal token requests from now on because something responded
// because IMDS always responds with JSON, assume a non-JSON response is from something else, such
// as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating
b, err := azruntime.Payload(res)
if err != nil {
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err))
}
if !json.Valid(b) {
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe")
}
// send normal token requests from now on because IMDS responded
c.probeIMDS = false
}
@ -228,7 +250,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
resp, err := c.azClient.Pipeline().Do(msg)
if err != nil {
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err)
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil)
}
if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
@ -239,7 +261,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
switch resp.StatusCode {
case http.StatusBadRequest:
if id != nil {
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil)
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp)
}
msg := "failed to authenticate a system assigned identity"
if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 {
@ -256,7 +278,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
}
}
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil)
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp)
}
func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) {
@ -284,10 +306,10 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac
if expiresOn, err := strconv.Atoi(v); err == nil {
return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil
}
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil)
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res)
default:
msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v)
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil)
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res)
}
}
@ -302,15 +324,15 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage
key, err := c.getAzureArcSecretKey(ctx, scopes)
if err != nil {
msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err)
return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err)
return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil)
}
return c.createAzureArcAuthRequest(ctx, id, scopes, key)
return c.createAzureArcAuthRequest(ctx, scopes, key)
case msiTypeAzureML:
return c.createAzureMLAuthRequest(ctx, id, scopes)
case msiTypeServiceFabric:
return c.createServiceFabricAuthRequest(ctx, id, scopes)
return c.createServiceFabricAuthRequest(ctx, scopes)
case msiTypeCloudShell:
return c.createCloudShellAuthRequest(ctx, id, scopes)
return c.createCloudShellAuthRequest(ctx, scopes)
default:
return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment")
}
@ -323,13 +345,16 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma
}
request.Raw().Header.Set(headerMetadata, "true")
q := request.Raw().URL.Query()
q.Add("api-version", imdsAPIVersion)
q.Add("resource", strings.Join(scopes, " "))
q.Set("api-version", imdsAPIVersion)
q.Set("resource", strings.Join(scopes, " "))
if id != nil {
if id.idKind() == miResourceID {
q.Add(msiResID, id.String())
} else {
q.Add(qpClientID, id.String())
switch id.idKind() {
case miClientID:
q.Set(qpClientID, id.String())
case miObjectID:
q.Set("object_id", id.String())
case miResourceID:
q.Set(msiResID, id.String())
}
}
request.Raw().URL.RawQuery = q.Encode()
@ -343,13 +368,16 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context,
}
request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader))
q := request.Raw().URL.Query()
q.Add("api-version", "2019-08-01")
q.Add("resource", scopes[0])
q.Set("api-version", "2019-08-01")
q.Set("resource", scopes[0])
if id != nil {
if id.idKind() == miResourceID {
q.Add(miResID, id.String())
} else {
q.Add(qpClientID, id.String())
switch id.idKind() {
case miClientID:
q.Set(qpClientID, id.String())
case miObjectID:
q.Set("principal_id", id.String())
case miResourceID:
q.Set(miResID, id.String())
}
}
request.Raw().URL.RawQuery = q.Encode()
@ -363,23 +391,24 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id
}
request.Raw().Header.Set("secret", os.Getenv(msiSecret))
q := request.Raw().URL.Query()
q.Add("api-version", "2017-09-01")
q.Add("resource", strings.Join(scopes, " "))
q.Add("clientid", os.Getenv(defaultIdentityClientID))
q.Set("api-version", "2017-09-01")
q.Set("resource", strings.Join(scopes, " "))
q.Set("clientid", os.Getenv(defaultIdentityClientID))
if id != nil {
if id.idKind() == miResourceID {
log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID")
q.Set("clientid", "")
q.Set(miResID, id.String())
} else {
switch id.idKind() {
case miClientID:
q.Set("clientid", id.String())
case miObjectID:
return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by object ID", nil)
case miResourceID:
return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by resource ID", nil)
}
}
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return nil, err
@ -387,16 +416,8 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte
q := request.Raw().URL.Query()
request.Raw().Header.Set("Accept", "application/json")
request.Raw().Header.Set("Secret", os.Getenv(identityHeader))
q.Add("api-version", serviceFabricAPIVersion)
q.Add("resource", strings.Join(scopes, " "))
if id != nil {
log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime")
if id.idKind() == miResourceID {
q.Add(miResID, id.String())
} else {
q.Add(qpClientID, id.String())
}
}
q.Set("api-version", serviceFabricAPIVersion)
q.Set("resource", strings.Join(scopes, " "))
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
@ -409,8 +430,8 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour
}
request.Raw().Header.Set(headerMetadata, "true")
q := request.Raw().URL.Query()
q.Add("api-version", azureArcAPIVersion)
q.Add("resource", strings.Join(resources, " "))
q.Set("api-version", azureArcAPIVersion)
q.Set("resource", strings.Join(resources, " "))
request.Raw().URL.RawQuery = q.Encode()
// send the initial request to get the short-lived secret key
response, err := c.azClient.Pipeline().Do(request)
@ -421,39 +442,39 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour
// of the secret key file. Any other status code indicates an error in the request.
if response.StatusCode != 401 {
msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode)
return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil)
return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response)
}
header := response.Header.Get("WWW-Authenticate")
if len(header) == 0 {
return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil)
return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil)
}
// the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key
_, p, found := strings.Cut(header, "=")
if !found {
return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil)
return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil)
}
expected, err := arcKeyDirectory()
if err != nil {
return "", err
}
if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") {
return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil)
return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil)
}
f, err := os.Stat(p)
if err != nil {
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil)
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil)
}
if s := f.Size(); s > 4096 {
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil)
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil)
}
key, err := os.ReadFile(p)
if err != nil {
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil)
return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil)
}
return string(key), nil
}
func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) {
func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, resources []string, key string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
return nil, err
@ -461,21 +482,13 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i
request.Raw().Header.Set(headerMetadata, "true")
request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
q := request.Raw().URL.Query()
q.Add("api-version", azureArcAPIVersion)
q.Add("resource", strings.Join(resources, " "))
if id != nil {
log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities")
if id.idKind() == miResourceID {
q.Add(miResID, id.String())
} else {
q.Add(qpClientID, id.String())
}
}
q.Set("api-version", azureArcAPIVersion)
q.Set("resource", strings.Join(resources, " "))
request.Raw().URL.RawQuery = q.Encode()
return request, nil
}
func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) {
request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint)
if err != nil {
return nil, err
@ -488,14 +501,5 @@ func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context,
if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil {
return nil, err
}
if id != nil {
log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities")
q := request.Raw().URL.Query()
if id.idKind() == miResourceID {
q.Add(miResID, id.String())
} else {
q.Add(qpClientID, id.String())
}
}
return request, nil
}

View file

@ -22,8 +22,9 @@ const credNameManagedIdentity = "ManagedIdentityCredential"
type managedIdentityIDKind int
const (
miClientID managedIdentityIDKind = 0
miResourceID managedIdentityIDKind = 1
miClientID managedIdentityIDKind = iota
miObjectID
miResourceID
)
// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID
@ -32,7 +33,12 @@ type ManagedIDKind interface {
idKind() managedIdentityIDKind
}
// ClientID is the client ID of a user-assigned managed identity.
// ClientID is the client ID of a user-assigned managed identity. [NewManagedIdentityCredential]
// returns an error when a ClientID is specified on the following platforms:
//
// - Azure Arc
// - Cloud Shell
// - Service Fabric
type ClientID string
func (ClientID) idKind() managedIdentityIDKind {
@ -44,7 +50,31 @@ func (c ClientID) String() string {
return string(c)
}
// ResourceID is the resource ID of a user-assigned managed identity.
// ObjectID is the object ID of a user-assigned managed identity. [NewManagedIdentityCredential]
// returns an error when an ObjectID is specified on the following platforms:
//
// - Azure Arc
// - Azure ML
// - Cloud Shell
// - Service Fabric
type ObjectID string
func (ObjectID) idKind() managedIdentityIDKind {
return miObjectID
}
// String returns the string value of the ID.
func (o ObjectID) String() string {
return string(o)
}
// ResourceID is the resource ID of a user-assigned managed identity. [NewManagedIdentityCredential]
// returns an error when a ResourceID is specified on the following platforms:
//
// - Azure Arc
// - Azure ML
// - Cloud Shell
// - Service Fabric
type ResourceID string
func (ResourceID) idKind() managedIdentityIDKind {
@ -60,9 +90,10 @@ func (r ResourceID) String() string {
type ManagedIdentityCredentialOptions struct {
azcore.ClientOptions
// ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity
// instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that
// some platforms don't accept resource IDs.
// ID of a managed identity the credential should authenticate. Set this field to use a specific identity instead of
// the hosting environment's default. The value may be the identity's client, object, or resource ID.
// NewManagedIdentityCredential returns an error when the hosting environment doesn't support user-assigned managed
// identities, or the specified kind of ID.
ID ManagedIDKind
// dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have
@ -73,10 +104,11 @@ type ManagedIdentityCredentialOptions struct {
dac bool
}
// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities.
// ManagedIdentityCredential authenticates an [Azure managed identity] in any hosting environment supporting managed identities.
// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a
// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities:
// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
// user-assigned identity.
//
// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
type ManagedIdentityCredential struct {
client *confidentialClient
mic *managedIdentityClient

View file

@ -30,12 +30,12 @@ type publicClientOptions struct {
azcore.ClientOptions
AdditionallyAllowedTenants []string
Cache Cache
DeviceCodePrompt func(context.Context, DeviceCodeMessage) error
DisableAutomaticAuthentication bool
DisableInstanceDiscovery bool
LoginHint, RedirectURL string
Record authenticationRecord
TokenCachePersistenceOptions *tokenCachePersistenceOptions
Record AuthenticationRecord
Username, Password string
}
@ -48,7 +48,7 @@ type publicClient struct {
host string
name string
opts publicClientOptions
record authenticationRecord
record AuthenticationRecord
azClient *azcore.Client
}
@ -107,19 +107,19 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p
}, nil
}
func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) {
func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (AuthenticationRecord, error) {
if tro == nil {
tro = &policy.TokenRequestOptions{}
}
if len(tro.Scopes) == 0 {
if p.defaultScope == nil {
return authenticationRecord{}, errScopeRequired
return AuthenticationRecord{}, errScopeRequired
}
tro.Scopes = p.defaultScope
}
client, mu, err := p.client(*tro)
if err != nil {
return authenticationRecord{}, err
return AuthenticationRecord{}, err
}
mu.Lock()
defer mu.Unlock()
@ -152,7 +152,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti
return p.token(ar, err)
}
if p.opts.DisableAutomaticAuthentication {
return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro)
return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro)
}
at, err := p.reqToken(ctx, client, tro)
if err == nil {
@ -222,13 +222,13 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient,
}
func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE)
c, err := internal.ExportReplace(p.opts.Cache, enableCAE)
if err != nil {
return nil, err
}
o := []public.Option{
public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)),
public.WithCache(cache),
public.WithCache(c),
public.WithHTTPClient(p),
}
if enableCAE {
@ -244,8 +244,7 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke
if err == nil {
p.record, err = newAuthenticationRecord(ar)
} else {
res := getResponseFromError(err)
err = newAuthenticationFailedError(p.name, err.Error(), res, err)
err = newAuthenticationFailedErrorFromMSAL(p.name, err)
}
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
}

View file

@ -5,7 +5,19 @@
param (
[hashtable] $AdditionalParameters = @{},
[hashtable] $DeploymentOutputs
[hashtable] $DeploymentOutputs,
[Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)]
[ValidateNotNullOrEmpty()]
[string] $TenantId,
[Parameter()]
[ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')]
[string] $TestApplicationId,
# Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors).
[Parameter(ValueFromRemainingArguments = $true)]
$RemainingArguments
)
$ErrorActionPreference = 'Stop'
@ -16,14 +28,14 @@ if ($CI) {
Write-Host "Skipping post-provisioning script because resources weren't deployed"
return
}
az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID']
az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId
az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID']
}
Write-Host "Building container"
$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test"
Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @"
FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder
FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder
ENV GOARCH=amd64 GOWORK=off
COPY . /azidentity
WORKDIR /azidentity/testdata/managed-id-test
@ -53,9 +65,11 @@ az container create -g $rg -n $aciName --image $image `
--role "Storage Blob Data Reader" `
--scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) `
-e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) `
AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) `
AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
FUNCTIONS_CUSTOMHANDLER_PORT=80
AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) `
AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) `
AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) `
FUNCTIONS_CUSTOMHANDLER_PORT=80
Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName"
# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip

View file

@ -135,6 +135,14 @@ resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) {
name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY'
value: deployResources ? usermgdid.id : null
}
{
name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID'
value: deployResources ? usermgdid.properties.clientId : null
}
{
name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID'
value: deployResources ? usermgdid.properties.principalId : null
}
{
name: 'AzureWebJobsStorage'
value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}'
@ -217,3 +225,4 @@ output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAs
output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : ''

View file

@ -25,18 +25,20 @@ type UsernamePasswordCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
// authenticationRecord returned by a call to a credential's Authenticate method. Set this option
// AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
authenticationRecord authenticationRecord
AuthenticationRecord AuthenticationRecord
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
// tokenCachePersistenceOptions enables persistent token caching when not nil.
tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
@ -54,13 +56,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
options = &UsernamePasswordCredentialOptions{}
}
opts := publicClientOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
Password: password,
Record: options.authenticationRecord,
TokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
Username: username,
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
Password: password,
Record: options.AuthenticationRecord,
Username: username,
}
c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts)
if err != nil {
@ -70,7 +72,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
}
// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
func (c *UsernamePasswordCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()

View file

@ -14,5 +14,5 @@ const (
module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
// Version is the semantic version (see http://semver.org) of this module.
version = "v1.7.0"
version = "v1.8.0"
)

View file

@ -39,15 +39,24 @@ type WorkloadIdentityCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
// Cache is a persistent cache the credential will use to store the tokens it acquires, making
// them available to other processes and credential instances. The default, zero value means the
// credential will store tokens in memory and not share them with any other credential instance.
Cache Cache
// ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID.
ClientID string
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
// TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID.
TenantID string
// TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the
// environment variable AZURE_FEDERATED_TOKEN_FILE.
TokenFilePath string
@ -81,6 +90,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (
w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}}
caco := ClientAssertionCredentialOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}

View file

@ -1,5 +1,17 @@
# Release History
## 1.5.0 (2024-11-13)
### Features Added
* Fix compareHeaders custom sorting algorithm for String To Sign.
* Added permissions & resourcetype parameters in listblob response.
## 1.5.0-beta.1 (2024-10-22)
### Other Changes
* Updated `azcore` version to `1.16.0`
* Updated `azidentity` version to `1.8.0`
## 1.4.1 (2024-09-18)
### Features Added

View file

@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azblob",
"Tag": "go/storage/azblob_bbf7a929e3"
"Tag": "go/storage/azblob_e5b4fd09a3"
}

View file

@ -7,10 +7,11 @@
package container
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"reflect"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
@ -126,7 +127,7 @@ func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetProperties
// ListBlobsInclude indicates what additional information the service should return with each blob.
type ListBlobsInclude struct {
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions, Permissions bool
}
func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem {
@ -166,7 +167,9 @@ func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem {
if l.Versions {
include = append(include, generated.ListBlobsIncludeItemVersions)
}
if l.Permissions {
include = append(include, generated.ListBlobsIncludeItemPermissions)
}
return include
}

View file

@ -154,13 +154,19 @@ func compareHeaders(lhs, rhs string, tables [][]int) int {
return 0
}
w1 := tables[currLevel][lhs[i]]
if i >= lhsLen {
var w1, w2 int
// Check bounds before accessing lhs[i]
if i < lhsLen {
w1 = tables[currLevel][lhs[i]]
} else {
w1 = 0x1
}
w2 := tables[currLevel][rhs[j]]
if j >= rhsLen {
// Check bounds before accessing rhs[j]
if j < rhsLen {
w2 = tables[currLevel][rhs[j]]
} else {
w2 = 0x1
}

View file

@ -8,5 +8,5 @@ package exported
const (
ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
ModuleVersion = "v1.4.1"
ModuleVersion = "v1.5.0"
)

View file

@ -22,7 +22,40 @@ export-clients: true
use: "@autorest/go@4.0.0-preview.65"
```
### Updating service version to 2024-08-04
### Add Owner,Group,Permissions,Acl,ResourceType in ListBlob Response
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
$.BlobPropertiesInternal.properties["Owner"] = {
"type" : "string",
};
$.BlobPropertiesInternal.properties["Group"] = {
"type" : "string",
};
$.BlobPropertiesInternal.properties["Permissions"] = {
"type" : "string",
};
$.BlobPropertiesInternal.properties["Acl"] = {
"type" : "string",
};
$.BlobPropertiesInternal.properties["ResourceType"] = {
"type" : "string",
};
```
### Add permissions in ListBlobsInclude
``` yaml
directive:
- from: swagger-document
where: $.parameters.ListBlobsInclude
transform: >
$.items.enum.push("permissions");
```
### Updating service version to 2024-11-04
```yaml
directive:
- from:
@ -35,7 +68,7 @@ directive:
where: $
transform: >-
return $.
replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`);
replaceAll(`[]string{"2024-08-04"}`, `[]string{ServiceVersion}`);
```
### Fix CRC Response Header in PutBlob response

View file

@ -6,4 +6,4 @@
package generated
const ServiceVersion = "2024-08-04"
const ServiceVersion = "2024-11-04"

View file

@ -116,7 +116,7 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
@ -310,7 +310,7 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
if options != nil && options.SourceRange != nil {
req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -494,7 +494,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -615,7 +615,7 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}

View file

@ -72,7 +72,7 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -161,7 +161,7 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio
if options != nil && options.ProposedLeaseID != nil {
req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -259,7 +259,7 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *
if options != nil && options.BreakPeriod != nil {
req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -365,7 +365,7 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID
req.Raw().Header["x-ms-lease-action"] = []string{"change"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -517,7 +517,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -659,7 +659,7 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio
}
}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -788,7 +788,7 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -854,7 +854,7 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -965,7 +965,7 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl
if options != nil && options.RangeGetContentMD5 != nil {
req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1229,7 +1229,7 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1343,7 +1343,7 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1640,7 +1640,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1742,7 +1742,7 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.QueryRequest != nil {
if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil {
return nil, err
@ -1961,7 +1961,7 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID
}
req.Raw().Header["x-ms-lease-action"] = []string{"release"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2055,7 +2055,7 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s
}
req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2138,7 +2138,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti
if options != nil && options.ExpiresOn != nil {
req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2252,7 +2252,7 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2345,7 +2345,7 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2422,7 +2422,7 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2536,7 +2536,7 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options
}
}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2645,7 +2645,7 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, tags); err != nil {
return nil, err
}
@ -2735,7 +2735,7 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT
if options != nil && options.RehydratePriority != nil {
req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2861,7 +2861,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -2945,7 +2945,7 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}

View file

@ -152,7 +152,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, blocks); err != nil {
return nil, err
}
@ -273,7 +273,7 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -470,7 +470,7 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -599,7 +599,7 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
@ -748,7 +748,7 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
if options != nil && options.SourceRange != nil {
req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -927,7 +927,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}

View file

@ -343,6 +343,7 @@ const (
ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy"
ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold"
ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata"
ListBlobsIncludeItemPermissions ListBlobsIncludeItem = "permissions"
ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots"
ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags"
ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs"
@ -358,6 +359,7 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem {
ListBlobsIncludeItemImmutabilitypolicy,
ListBlobsIncludeItemLegalhold,
ListBlobsIncludeItemMetadata,
ListBlobsIncludeItemPermissions,
ListBlobsIncludeItemSnapshots,
ListBlobsIncludeItemTags,
ListBlobsIncludeItemUncommittedblobs,

View file

@ -83,7 +83,7 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du
if options != nil && options.ProposedLeaseID != nil {
req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -174,7 +174,7 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti
if options != nil && options.BreakPeriod != nil {
req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -273,7 +273,7 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea
req.Raw().Header["x-ms-lease-action"] = []string{"change"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -369,7 +369,7 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options
}
}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -456,7 +456,7 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -534,7 +534,7 @@ func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, whe
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -609,7 +609,7 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context,
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -692,7 +692,7 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context,
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -775,7 +775,7 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -897,7 +897,7 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -988,7 +988,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1071,7 +1071,7 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le
}
req.Raw().Header["x-ms-lease-action"] = []string{"release"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1152,7 +1152,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo
if options != nil && options.SourceLeaseID != nil {
req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1229,7 +1229,7 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas
}
req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1314,7 +1314,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options
if options != nil && options.DeletedContainerVersion != nil {
req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1397,7 +1397,7 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context,
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
type wrapper struct {
XMLName xml.Name `xml:"SignedIdentifiers"`
ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"`
@ -1495,7 +1495,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt
}
}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1578,7 +1578,7 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, multipartContentType); err != nil {
return nil, err
}

View file

@ -95,6 +95,7 @@ type BlobProperties struct {
// REQUIRED
LastModified *time.Time `xml:"Last-Modified"`
ACL *string `xml:"Acl"`
AccessTier *AccessTier `xml:"AccessTier"`
AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"`
AccessTierInferred *bool `xml:"AccessTierInferred"`
@ -124,6 +125,7 @@ type BlobProperties struct {
// The name of the encryption scope under which the blob is encrypted.
EncryptionScope *string `xml:"EncryptionScope"`
ExpiresOn *time.Time `xml:"Expiry-Time"`
Group *string `xml:"Group"`
ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"`
ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"`
IncrementalCopy *bool `xml:"IncrementalCopy"`
@ -133,11 +135,14 @@ type BlobProperties struct {
LeaseState *LeaseStateType `xml:"LeaseState"`
LeaseStatus *LeaseStatusType `xml:"LeaseStatus"`
LegalHold *bool `xml:"LegalHold"`
Owner *string `xml:"Owner"`
Permissions *string `xml:"Permissions"`
// If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High
// and Standard.
RehydratePriority *RehydratePriority `xml:"RehydratePriority"`
RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
ResourceType *string `xml:"ResourceType"`
ServerEncrypted *bool `xml:"ServerEncrypted"`
TagCount *int32 `xml:"TagCount"`
}

View file

@ -114,7 +114,7 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
if options != nil && options.Range != nil {
req.Raw().Header["x-ms-range"] = []string{*options.Range}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -235,7 +235,7 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context,
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -399,7 +399,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -533,7 +533,7 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op
if options != nil && options.Range != nil {
req.Raw().Header["x-ms-range"] = []string{*options.Range}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -660,7 +660,7 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context
if options != nil && options.Range != nil {
req.Raw().Header["x-ms-range"] = []string{*options.Range}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -782,7 +782,7 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -891,7 +891,7 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -1031,7 +1031,7 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
if options != nil && options.Range != nil {
req.Raw().Header["x-ms-range"] = []string{*options.Range}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
@ -1224,7 +1224,7 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-source-range"] = []string{sourceRange}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}

View file

@ -77,7 +77,7 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -146,7 +146,7 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -226,7 +226,7 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -289,7 +289,7 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -361,7 +361,7 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, keyInfo); err != nil {
return nil, err
}
@ -428,7 +428,7 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@ -492,7 +492,7 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil {
return nil, err
}
@ -560,7 +560,7 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-version"] = []string{"2024-08-04"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, multipartContentType); err != nil {
return nil, err
}

View file

@ -36,19 +36,21 @@ func NewParser(options ...ParserOption) *Parser {
return p
}
// Parse parses, validates, verifies the signature and returns the parsed token.
// keyFunc will receive the parsed token and should return the key for validating.
// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will
// receive the parsed token and should return the key for validating.
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
}
// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
// than the default MapClaims implementation of Claims.
// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object
// implementing the Claims interface. This provides default values which can be overridden and
// allows a caller to use their own type, rather than the default MapClaims implementation of
// Claims.
//
// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
// Note: If you provide a custom claim implementation that embeds one of the standard claims (such
// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or
// b) if you are using a pointer, allocate the proper memory for it before passing in the overall
// claims, otherwise you might run into a panic.
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
token, parts, err := p.ParseUnverified(tokenString, claims)
if err != nil {
@ -85,12 +87,17 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
}
// Perform validation
token.Signature = parts[2]
if err := token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid}
}
vErr := &ValidationError{}
// Validate Claims
if !p.SkipClaimsValidation {
if err := token.Claims.Valid(); err != nil {
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
@ -98,22 +105,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
} else {
vErr = e
}
return token, vErr
}
}
// Perform validation
token.Signature = parts[2]
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
vErr.Inner = err
vErr.Errors |= ValidationErrorSignatureInvalid
}
// No errors so far, token is valid.
token.Valid = true
if vErr.valid() {
token.Valid = true
return token, nil
}
return token, vErr
return token, nil
}
// ParseUnverified parses the token but doesn't validate the signature.

View file

@ -14,13 +14,15 @@ import (
)
type DiskCustomization struct {
// TODO: Add partition table type: gpt or dos
// Type of the partition table: gpt or dos.
// Optional, the default depends on the distro and image type.
Type string
MinSize uint64
Partitions []PartitionCustomization
}
type diskCustomizationMarshaler struct {
// TODO: Add partition table type: gpt or dos
Type string `json:"type,omitempty" toml:"type,omitempty"`
MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"partitions,omitempty"`
}
@ -30,6 +32,7 @@ func (dc *DiskCustomization) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &dcm); err != nil {
return err
}
dc.Type = dcm.Type
dc.MinSize = dcm.MinSize.Uint64()
dc.Partitions = dcm.Partitions
@ -345,6 +348,21 @@ func (p *DiskCustomization) Validate() error {
return nil
}
switch p.Type {
case "gpt", "":
case "dos":
// dos/mbr only supports 4 partitions
// Unfortunately, at this stage it's unknown whether we will need extra
// partitions (bios boot, root, esp), so this check is just to catch
// obvious invalid customizations early. The final partition table is
// checked after it's created.
if len(p.Partitions) > 4 {
return fmt.Errorf("invalid partitioning customizations: \"dos\" partition table type only supports up to 4 partitions: got %d", len(p.Partitions))
}
default:
return fmt.Errorf("unknown partition table type: %s (valid: gpt, dos)", p.Type)
}
mountpoints := make(map[string]bool)
vgnames := make(map[string]bool)
var errs []error
@ -470,7 +488,7 @@ func (p *PartitionCustomization) validatePlain(mountpoints map[string]bool) erro
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, p.FSType) {
return fmt.Errorf("unknown or invalid filesystem type for mountpoint %q: %s", p.Mountpoint, p.FSType)
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for mountpoint %q: %s", p.Mountpoint, p.FSType)
}
if err := validateFilesystemType(p.Mountpoint, p.FSType); err != nil {
return err
@ -523,7 +541,7 @@ func (p *PartitionCustomization) validateLVM(mountpoints, vgnames map[string]boo
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, lv.FSType) {
return fmt.Errorf("unknown or invalid filesystem type for logical volume with mountpoint %q: %s", lv.Mountpoint, lv.FSType)
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for logical volume with mountpoint %q: %s", lv.Mountpoint, lv.FSType)
}
}
return nil

View file

@ -40,7 +40,7 @@ const (
// rounded to the next MiB.
DefaultGrainBytes = uint64(1048576) // 1 MiB
// UUIDs
// UUIDs for GPT disks
BIOSBootPartitionGUID = "21686148-6449-6E6F-744E-656564454649"
BIOSBootPartitionUUID = "FAC7F1FB-3E8D-4137-A512-961DE09A5549"
@ -61,31 +61,42 @@ const (
// Extended Boot Loader Partition
XBootLDRPartitionGUID = "BC13C2FF-59E6-4262-A352-B275FD6F7172"
// DosFat16B used for the ESP-System partition
DosFat16B = "06"
// Partition type IDs for DOS disks
// Partition type ID for BIOS boot partition on dos.
// Type ID is for 'empty'.
// TODO: drop this completely when we convert the bios BOOT space to a
// partitionless gap/offset.
BIOSBootPartitionDOSID = "00"
// Partition type ID for any native Linux filesystem on dos
DosLinuxTypeID = "83"
FilesystemLinuxDOSID = "83"
// Partition type ID for BIOS boot partition on dos
DosBIOSBootID = "ef02"
// FAT16BDOSID used for the ESP-System partition
FAT16BDOSID = "06"
// Partition type ID for LVM on dos
LVMPartitionDOSID = "8e"
// Partition type ID for ESP on dos
DosESPID = "ef00"
EFISystemPartitionDOSID = "ef"
// Partition type ID for swap
DosSwapID = "82"
SwapPartitionDOSID = "82"
// Partition type ID for PRep on dos
PRepPartitionDOSID = "41"
)
// pt type -> type -> ID mapping for convenience
var idMap = map[PartitionTableType]map[string]string{
PT_DOS: {
"bios": DosBIOSBootID,
"boot": DosLinuxTypeID,
"data": DosLinuxTypeID,
"esp": DosESPID,
"lvm": DosLinuxTypeID,
"swap": DosSwapID,
"bios": BIOSBootPartitionDOSID,
"boot": FilesystemLinuxDOSID,
"data": FilesystemLinuxDOSID,
"esp": EFISystemPartitionDOSID,
"lvm": LVMPartitionDOSID,
"swap": SwapPartitionDOSID,
},
PT_GPT: {
"bios": BIOSBootPartitionGUID,

View file

@ -87,7 +87,7 @@ func (p *Partition) IsBIOSBoot() bool {
return false
}
return p.Type == BIOSBootPartitionGUID
return p.Type == BIOSBootPartitionGUID || p.Type == BIOSBootPartitionDOSID
}
func (p *Partition) IsPReP() bool {
@ -95,7 +95,7 @@ func (p *Partition) IsPReP() bool {
return false
}
return p.Type == "41" || p.Type == PRePartitionGUID
return p.Type == PRepPartitionDOSID || p.Type == PRePartitionGUID
}
func (p *Partition) MarshalJSON() ([]byte, error) {

View file

@ -731,36 +731,36 @@ func (pt *PartitionTable) ensureLVM() error {
if _, ok := parent.(*LVMLogicalVolume); ok {
return nil
} else if part, ok := parent.(*Partition); ok {
filesystem := part.Payload
vg := &LVMVolumeGroup{
Name: "rootvg",
Description: "created via lvm2 and osbuild",
}
// create root logical volume on the new volume group with the same
// size and filesystem as the previous root partition
_, err := vg.CreateLogicalVolume("rootlv", part.Size, filesystem)
if err != nil {
panic(fmt.Sprintf("Could not create LV: %v", err))
}
// replace the top-level partition payload with the new volume group
part.Payload = vg
// reset the vg partition size - it will be grown later
part.Size = 0
if pt.Type == PT_GPT {
part.Type = LVMPartitionGUID
} else {
part.Type = "8e"
}
} else {
}
part, ok := parent.(*Partition)
if !ok {
return fmt.Errorf("Unsupported parent for LVM")
}
filesystem := part.Payload
vg := &LVMVolumeGroup{
Name: "rootvg",
Description: "created via lvm2 and osbuild",
}
// create root logical volume on the new volume group with the same
// size and filesystem as the previous root partition
_, err := vg.CreateLogicalVolume("rootlv", part.Size, filesystem)
if err != nil {
panic(fmt.Sprintf("Could not create LV: %v", err))
}
// replace the top-level partition payload with the new volume group
part.Payload = vg
// reset the vg partition size - it will be grown later
part.Size = 0
if pt.Type == PT_GPT {
part.Type = LVMPartitionGUID
} else {
part.Type = LVMPartitionDOSID
}
return nil
}
@ -789,44 +789,44 @@ func (pt *PartitionTable) ensureBtrfs() error {
if _, ok := parent.(*Btrfs); ok {
return nil
} else if part, ok := parent.(*Partition); ok {
rootMountable, ok := rootPath[0].(Mountable)
if !ok {
return fmt.Errorf("root entity is not mountable: %T, this is a violation of entityPath() contract", rootPath[0])
}
opts, err := rootMountable.GetFSTabOptions()
if err != nil {
return err
}
btrfs := &Btrfs{
Label: "root",
Subvolumes: []BtrfsSubvolume{
{
Name: "root",
Mountpoint: "/",
Compress: DefaultBtrfsCompression,
ReadOnly: opts.ReadOnly(),
Size: part.Size,
},
},
}
// replace the top-level partition payload with a new btrfs filesystem
part.Payload = btrfs
// reset the btrfs partition size - it will be grown later
part.Size = 0
part.Type, err = getPartitionTypeIDfor(pt.Type, "data")
if err != nil {
return fmt.Errorf("error converting partition table to btrfs: %w", err)
}
} else {
}
part, ok := parent.(*Partition)
if !ok {
return fmt.Errorf("unsupported parent for btrfs: %T", parent)
}
rootMountable, ok := rootPath[0].(Mountable)
if !ok {
return fmt.Errorf("root entity is not mountable: %T, this is a violation of entityPath() contract", rootPath[0])
}
opts, err := rootMountable.GetFSTabOptions()
if err != nil {
return err
}
btrfs := &Btrfs{
Label: "root",
Subvolumes: []BtrfsSubvolume{
{
Name: "root",
Mountpoint: "/",
Compress: DefaultBtrfsCompression,
ReadOnly: opts.ReadOnly(),
Size: part.Size,
},
},
}
// replace the top-level partition payload with a new btrfs filesystem
part.Payload = btrfs
// reset the btrfs partition size - it will be grown later
part.Size = 0
part.Type, err = getPartitionTypeIDfor(pt.Type, "data")
if err != nil {
return fmt.Errorf("error converting partition table to btrfs: %w", err)
}
return nil
}
@ -1232,15 +1232,24 @@ func NewCustomPartitionTable(customizations *blueprint.DiskCustomization, option
pt := &PartitionTable{}
// TODO: Handle partition table type in customizations
switch options.PartitionTableType {
case PT_GPT, PT_DOS:
pt.Type = options.PartitionTableType
case PT_NONE:
// default to "gpt"
switch customizations.Type {
case "dos":
pt.Type = PT_DOS
case "gpt":
pt.Type = PT_GPT
case "":
// partition table type not specified, determine the default
switch options.PartitionTableType {
case PT_GPT, PT_DOS:
pt.Type = options.PartitionTableType
case PT_NONE:
// default to "gpt"
pt.Type = PT_GPT
default:
return nil, fmt.Errorf("%s invalid partition table type enum value: %d", errPrefix, options.PartitionTableType)
}
default:
return nil, fmt.Errorf("%s invalid partition table type enum value: %d", errPrefix, options.PartitionTableType)
return nil, fmt.Errorf("%s invalid partition table type: %s", errPrefix, customizations.Type)
}
// add any partition(s) that are needed for booting (like /boot/efi)
@ -1266,7 +1275,9 @@ func NewCustomPartitionTable(customizations *blueprint.DiskCustomization, option
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
addBtrfsPartition(pt, part)
if err := addBtrfsPartition(pt, part); err != nil {
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
default:
return nil, fmt.Errorf("%s invalid partition type: %s", errPrefix, part.Type)
}
@ -1283,6 +1294,16 @@ func NewCustomPartitionTable(customizations *blueprint.DiskCustomization, option
pt.relayout(customizations.MinSize)
pt.GenerateUUIDs(rng)
// One thing not caught by the customization validation is if a final "dos"
// partition table has more than 4 partitions. This is not possible to
// predict with customizations alone because it depends on the boot type
// (which comes from the image type) which controls automatic partition
// creation. We should therefore always check the final partition table for
// this rule.
if pt.Type == PT_DOS && len(pt.Partitions) > 4 {
return nil, fmt.Errorf("%s invalid partition table: \"dos\" partition table type only supports up to 4 partitions: got %d after creating the partition table with all necessary partitions", errPrefix, len(pt.Partitions))
}
return pt, nil
}
@ -1387,8 +1408,12 @@ func addLVMPartition(pt *PartitionTable, partition blueprint.PartitionCustomizat
}
// create partition for volume group
partType, err := getPartitionTypeIDfor(pt.Type, "lvm")
if err != nil {
return fmt.Errorf("error creating lvm partition %q: %w", vgname, err)
}
newpart := Partition{
Type: LVMPartitionGUID,
Type: partType,
Size: partition.MinSize,
Bootable: false,
Payload: newvg,
@ -1397,7 +1422,7 @@ func addLVMPartition(pt *PartitionTable, partition blueprint.PartitionCustomizat
return nil
}
func addBtrfsPartition(pt *PartitionTable, partition blueprint.PartitionCustomization) {
func addBtrfsPartition(pt *PartitionTable, partition blueprint.PartitionCustomization) error {
subvols := make([]BtrfsSubvolume, len(partition.Subvolumes))
for idx, subvol := range partition.Subvolumes {
newsubvol := BtrfsSubvolume{
@ -1412,14 +1437,19 @@ func addBtrfsPartition(pt *PartitionTable, partition blueprint.PartitionCustomiz
}
// create partition for btrfs volume
partType, err := getPartitionTypeIDfor(pt.Type, "data")
if err != nil {
return fmt.Errorf("error creating btrfs partition: %w", err)
}
newpart := Partition{
Type: FilesystemDataGUID,
Type: partType,
Bootable: false,
Payload: newvol,
Size: partition.MinSize,
}
pt.Partitions = append(pt.Partitions, newpart)
return nil
}
// Determine if a boot partition is needed based on the customizations. A boot

View file

@ -1,6 +1,8 @@
package distro
import (
"time"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/customizations/subscription"
"github.com/osbuild/images/pkg/disk"
@ -120,7 +122,9 @@ type ImageType interface {
// specified in the given blueprint; it also returns any warnings (e.g.
// deprecation notices) generated by the manifest.
// The packageSpecSets must be labelled in the same way as the originating PackageSets.
Manifest(bp *blueprint.Blueprint, options ImageOptions, repos []rpmmd.RepoConfig, seed int64) (*manifest.Manifest, []string, error)
// A custom seed for the rng can be specified, if nil the seed will
// be random.
Manifest(bp *blueprint.Blueprint, options ImageOptions, repos []rpmmd.RepoConfig, seed *int64) (*manifest.Manifest, []string, error)
}
// The ImageOptions specify options for a specific image build
@ -155,3 +159,10 @@ func ExportsFallback() []string {
func PayloadPackageSets() []string {
return []string{}
}
func SeedFrom(p *int64) int64 {
if p == nil {
return time.Now().UnixNano()
}
return *p
}

View file

@ -100,7 +100,7 @@ var (
// We don't know the variant of the OS pipeline being installed
isoLabel: getISOLabelFunc("Unknown"),
buildPipelines: []string{"build"},
payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "os", "bootiso-tree", "bootiso"},
payloadPipelines: []string{"anaconda-tree", "efiboot-tree", "os", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"},
requiredPartitionSizes: requiredDirectorySizes,
}
@ -119,7 +119,7 @@ var (
image: liveInstallerImage,
isoLabel: getISOLabelFunc("Workstation"),
buildPipelines: []string{"build"},
payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"},
payloadPipelines: []string{"anaconda-tree", "efiboot-tree", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"},
requiredPartitionSizes: requiredDirectorySizes,
}
@ -200,7 +200,7 @@ var (
image: iotInstallerImage,
isoLabel: getISOLabelFunc("IoT"),
buildPipelines: []string{"build"},
payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"},
payloadPipelines: []string{"anaconda-tree", "efiboot-tree", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"},
requiredPartitionSizes: requiredDirectorySizes,
}

View file

@ -403,6 +403,10 @@ func liveInstallerImage(workload workload.Workload,
img.Filename = t.Filename()
if common.VersionGreaterThanOrEqual(img.OSVersion, VERSION_ROOTFS_SQUASHFS) {
img.RootfsType = manifest.SquashfsRootfs
}
return img, nil
}
@ -456,8 +460,6 @@ func imageInstallerImage(workload workload.Workload,
img.ExtraBasePackages = packageSets[installerPkgsKey]
img.SquashfsCompression = "lz4"
d := t.arch.distro
img.Product = d.product
@ -477,6 +479,11 @@ func imageInstallerImage(workload workload.Workload,
img.Filename = t.Filename()
img.SquashfsCompression = "lz4"
if common.VersionGreaterThanOrEqual(img.OSVersion, VERSION_ROOTFS_SQUASHFS) {
img.RootfsType = manifest.SquashfsRootfs
}
return img, nil
}
@ -660,8 +667,6 @@ func iotInstallerImage(workload workload.Workload,
anaconda.ModuleUsers,
}...)
img.SquashfsCompression = "lz4"
img.Product = d.product
img.Variant = "IoT"
img.OSVersion = d.osVersion
@ -675,6 +680,11 @@ func iotInstallerImage(workload workload.Workload,
img.Filename = t.Filename()
img.SquashfsCompression = "lz4"
if common.VersionGreaterThanOrEqual(img.OSVersion, VERSION_ROOTFS_SQUASHFS) {
img.RootfsType = manifest.SquashfsRootfs
}
return img, nil
}

View file

@ -207,7 +207,8 @@ func (t *imageType) PartitionType() disk.PartitionTableType {
func (t *imageType) Manifest(bp *blueprint.Blueprint,
options distro.ImageOptions,
repos []rpmmd.RepoConfig,
seed int64) (*manifest.Manifest, []string, error) {
seedp *int64) (*manifest.Manifest, []string, error) {
seed := distro.SeedFrom(seedp)
warnings, err := t.checkOptions(bp, options)
if err != nil {

View file

@ -112,7 +112,7 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
Partitions: []disk.Partition{
{
Size: 4 * datasizes.MebiByte,
Type: "41",
Type: disk.PRepPartitionDOSID,
Bootable: true,
},
{
@ -224,7 +224,7 @@ var minimalrawPartitionTables = distro.BasePartitionTableMap{
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.DosFat16B,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
@ -238,7 +238,7 @@ var minimalrawPartitionTables = distro.BasePartitionTableMap{
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.DosLinuxTypeID,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
@ -250,7 +250,7 @@ var minimalrawPartitionTables = distro.BasePartitionTableMap{
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.DosLinuxTypeID,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
@ -319,7 +319,7 @@ var iotBasePartitionTables = distro.BasePartitionTableMap{
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.DosFat16B,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
@ -333,7 +333,7 @@ var iotBasePartitionTables = distro.BasePartitionTableMap{
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.DosLinuxTypeID,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
@ -345,7 +345,7 @@ var iotBasePartitionTables = distro.BasePartitionTableMap{
},
{
Size: 2569 * datasizes.MebiByte,
Type: disk.DosLinuxTypeID,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",

View file

@ -2,3 +2,7 @@ package fedora
const VERSION_BRANCHED = "42"
const VERSION_RAWHIDE = "42"
// Fedora version 41 and later use a plain squashfs rootfs on the iso instead of
// compressing an ext4 filesystem.
const VERSION_ROOTFS_SQUASHFS = "41"

View file

@ -496,6 +496,9 @@ func EdgeInstallerImage(workload workload.Workload,
img.Kickstart.Timezone, _ = customizations.GetTimezoneSettings()
img.SquashfsCompression = "xz"
if t.Arch().Distro().Releasever() == "10" {
img.RootfsType = manifest.SquashfsRootfs
}
installerConfig, err := t.getDefaultInstallerConfig()
if err != nil {
@ -714,6 +717,9 @@ func ImageInstallerImage(workload workload.Workload,
img.AdditionalAnacondaModules = append(img.AdditionalAnacondaModules, anaconda.ModuleUsers)
img.SquashfsCompression = "xz"
if t.Arch().Distro().Releasever() == "10" {
img.RootfsType = manifest.SquashfsRootfs
}
// put the kickstart file in the root of the iso
img.ISORootKickstart = true

View file

@ -258,7 +258,8 @@ func (t *ImageType) PartitionType() disk.PartitionTableType {
func (t *ImageType) Manifest(bp *blueprint.Blueprint,
options distro.ImageOptions,
repos []rpmmd.RepoConfig,
seed int64) (*manifest.Manifest, []string, error) {
seedp *int64) (*manifest.Manifest, []string, error) {
seed := distro.SeedFrom(seedp)
if t.Workload != nil {
// For now, if an image type defines its own workload, don't allow any

View file

@ -40,7 +40,7 @@ func mkImageInstallerImgType() *rhel.ImageType {
},
rhel.ImageInstallerImage,
[]string{"build"},
[]string{"anaconda-tree", "rootfs-image", "efiboot-tree", "os", "bootiso-tree", "bootiso"},
[]string{"anaconda-tree", "efiboot-tree", "os", "bootiso-tree", "bootiso"},
[]string{"bootiso"},
)

View file

@ -90,7 +90,7 @@ func defaultBasePartitionTables(t *rhel.ImageType) (disk.PartitionTable, bool) {
Partitions: []disk.Partition{
{
Size: 4 * datasizes.MebiByte,
Type: "41",
Type: disk.PRepPartitionDOSID,
Bootable: true,
},
{

View file

@ -91,7 +91,7 @@ func defaultBasePartitionTables(t *rhel.ImageType) (disk.PartitionTable, bool) {
Partitions: []disk.Partition{
{
Size: 4 * datasizes.MebiByte,
Type: "41",
Type: disk.PRepPartitionDOSID,
Bootable: true,
},
{

View file

@ -173,7 +173,7 @@ func defaultBasePartitionTables(t *rhel.ImageType) (disk.PartitionTable, bool) {
Partitions: []disk.Partition{
{
Size: 4 * datasizes.MebiByte,
Type: "41",
Type: disk.PRepPartitionDOSID,
Bootable: true,
},
{

View file

@ -232,7 +232,7 @@ func (t *TestImageType) Exports() []string {
return distro.ExportsFallback()
}
func (t *TestImageType) Manifest(b *blueprint.Blueprint, options distro.ImageOptions, repos []rpmmd.RepoConfig, seed int64) (*manifest.Manifest, []string, error) {
func (t *TestImageType) Manifest(b *blueprint.Blueprint, options distro.ImageOptions, repos []rpmmd.RepoConfig, seedp *int64) (*manifest.Manifest, []string, error) {
var bpPkgs []string
if b != nil {
mountpoints := b.Customizations.GetFilesystems()

View file

@ -23,6 +23,7 @@ type AnacondaContainerInstaller struct {
ExtraBasePackages rpmmd.PackageSet
SquashfsCompression string
RootfsType manifest.RootfsType
ISOLabel string
Product string
@ -98,8 +99,13 @@ func (img *AnacondaContainerInstaller) InstantiateManifest(m *manifest.Manifest,
}
anacondaPipeline.AdditionalDrivers = img.AdditionalDrivers
rootfsImagePipeline := manifest.NewISORootfsImg(buildPipeline, anacondaPipeline)
rootfsImagePipeline.Size = 4 * datasizes.GibiByte
var rootfsImagePipeline *manifest.ISORootfsImg
switch img.RootfsType {
case manifest.SquashfsExt4Rootfs:
rootfsImagePipeline = manifest.NewISORootfsImg(buildPipeline, anacondaPipeline)
rootfsImagePipeline.Size = 4 * datasizes.GibiByte
default:
}
bootTreePipeline := manifest.NewEFIBootTree(buildPipeline, img.Product, img.OSVersion)
bootTreePipeline.Platform = img.Platform

View file

@ -23,6 +23,9 @@ type AnacondaLiveInstaller struct {
ExtraBasePackages rpmmd.PackageSet
SquashfsCompression string
RootfsType manifest.RootfsType
ISOLabel string
Product string
Variant string
@ -70,8 +73,13 @@ func (img *AnacondaLiveInstaller) InstantiateManifest(m *manifest.Manifest,
livePipeline.Checkpoint()
rootfsImagePipeline := manifest.NewISORootfsImg(buildPipeline, livePipeline)
rootfsImagePipeline.Size = 8 * datasizes.GibiByte
var rootfsImagePipeline *manifest.ISORootfsImg
switch img.RootfsType {
case manifest.SquashfsExt4Rootfs:
rootfsImagePipeline = manifest.NewISORootfsImg(buildPipeline, livePipeline)
rootfsImagePipeline.Size = 8 * datasizes.GibiByte
default:
}
bootTreePipeline := manifest.NewEFIBootTree(buildPipeline, img.Product, img.OSVersion)
bootTreePipeline.Platform = img.Platform
@ -99,6 +107,8 @@ func (img *AnacondaLiveInstaller) InstantiateManifest(m *manifest.Manifest,
isoTreePipeline.KernelOpts = kernelOpts
isoTreePipeline.ISOLinux = isoLinuxEnabled
isoTreePipeline.SquashfsCompression = img.SquashfsCompression
isoPipeline := manifest.NewISO(buildPipeline, isoTreePipeline, img.ISOLabel)
isoPipeline.SetFilename(img.Filename)
isoPipeline.ISOLinux = isoLinuxEnabled

View file

@ -29,6 +29,7 @@ type AnacondaOSTreeInstaller struct {
Subscription *subscription.ImageOptions
SquashfsCompression string
RootfsType manifest.RootfsType
ISOLabel string
Product string
@ -101,8 +102,13 @@ func (img *AnacondaOSTreeInstaller) InstantiateManifest(m *manifest.Manifest,
anacondaPipeline.DisabledAnacondaModules = img.DisabledAnacondaModules
anacondaPipeline.AdditionalDrivers = img.AdditionalDrivers
rootfsImagePipeline := manifest.NewISORootfsImg(buildPipeline, anacondaPipeline)
rootfsImagePipeline.Size = 4 * datasizes.GibiByte
var rootfsImagePipeline *manifest.ISORootfsImg
switch img.RootfsType {
case manifest.SquashfsExt4Rootfs:
rootfsImagePipeline = manifest.NewISORootfsImg(buildPipeline, anacondaPipeline)
rootfsImagePipeline.Size = 4 * datasizes.GibiByte
default:
}
bootTreePipeline := manifest.NewEFIBootTree(buildPipeline, img.Product, img.OSVersion)
bootTreePipeline.Platform = img.Platform

View file

@ -57,6 +57,7 @@ type AnacondaTarInstaller struct {
Kickstart *kickstart.Options
SquashfsCompression string
RootfsType manifest.RootfsType
ISOLabel string
Product string
@ -153,8 +154,13 @@ func (img *AnacondaTarInstaller) InstantiateManifest(m *manifest.Manifest,
anacondaPipeline.Checkpoint()
rootfsImagePipeline := manifest.NewISORootfsImg(buildPipeline, anacondaPipeline)
rootfsImagePipeline.Size = 5 * datasizes.GibiByte
var rootfsImagePipeline *manifest.ISORootfsImg
switch img.RootfsType {
case manifest.SquashfsExt4Rootfs:
rootfsImagePipeline = manifest.NewISORootfsImg(buildPipeline, anacondaPipeline)
rootfsImagePipeline.Size = 5 * datasizes.GibiByte
default:
}
bootTreePipeline := manifest.NewEFIBootTree(buildPipeline, img.Product, img.OSVersion)
bootTreePipeline.Platform = img.Platform

View file

@ -17,6 +17,14 @@ import (
"github.com/osbuild/images/pkg/rpmmd"
)
type RootfsType uint64
// These constants are used by the ISO images to control the style of the root filesystem
const ( // Rootfs type enum
SquashfsExt4Rootfs RootfsType = iota // Create an EXT4 rootfs compressed by Squashfs
SquashfsRootfs // Create a plain squashfs rootfs
)
// An AnacondaInstallerISOTree represents a tree containing the anaconda installer,
// configuration in terms of a kickstart file, as well as an embedded
// payload to be installed, this payload can either be an ostree
@ -30,7 +38,7 @@ type AnacondaInstallerISOTree struct {
PartitionTable *disk.PartitionTable
anacondaPipeline *AnacondaInstaller
rootfsPipeline *ISORootfsImg
rootfsPipeline *ISORootfsImg // May be nil for plain squashfs rootfs
bootTreePipeline *EFIBootTree
// The path where the payload (tarball, ostree repo, or container) will be stored.
@ -68,7 +76,7 @@ type AnacondaInstallerISOTree struct {
func NewAnacondaInstallerISOTree(buildPipeline Build, anacondaPipeline *AnacondaInstaller, rootfsPipeline *ISORootfsImg, bootTreePipeline *EFIBootTree) *AnacondaInstallerISOTree {
// the three pipelines should all belong to the same manifest
if anacondaPipeline.Manifest() != rootfsPipeline.Manifest() ||
if (rootfsPipeline != nil && anacondaPipeline.Manifest() != rootfsPipeline.Manifest()) ||
anacondaPipeline.Manifest() != bootTreePipeline.Manifest() {
panic("pipelines from different manifests")
}
@ -278,7 +286,14 @@ func (p *AnacondaInstallerISOTree) serialize() osbuild.Pipeline {
}
}
squashfsStage := osbuild.NewSquashfsStage(&squashfsOptions, p.rootfsPipeline.Name())
// The iso's rootfs can either be an ext4 filesystem compressed with squashfs, or
// a squashfs of the plain directory tree
var squashfsStage *osbuild.Stage
if p.rootfsPipeline != nil {
squashfsStage = osbuild.NewSquashfsStage(&squashfsOptions, p.rootfsPipeline.Name())
} else {
squashfsStage = osbuild.NewSquashfsStage(&squashfsOptions, p.anacondaPipeline.Name())
}
pipeline.AddStage(squashfsStage)
if p.ISOLinux {

View file

@ -1,5 +1,11 @@
package osbuild
import (
"fmt"
"github.com/osbuild/images/pkg/disk"
)
// Partition a target using sfdisk(8)
type SfdiskStageOptions struct {
@ -36,7 +42,18 @@ type SfdiskPartition struct {
UUID string `json:"uuid,omitempty"`
}
func (o SfdiskStageOptions) validate() error {
if o.Label == disk.PT_DOS.String() && len(o.Partitions) > 4 {
return fmt.Errorf("sfdisk stage creation failed: \"dos\" partition table only supports up to 4 partitions: got %d", len(o.Partitions))
}
return nil
}
func NewSfdiskStage(options *SfdiskStageOptions, device *Device) *Stage {
if err := options.validate(); err != nil {
panic(err)
}
return &Stage{
Type: "org.osbuild.sfdisk",
Options: options,

View file

@ -28,7 +28,7 @@ import (
// AwsSecurityCredentials models AWS security credentials.
type AwsSecurityCredentials struct {
// AccessKeyId is the AWS Access Key ID - Required.
// AccessKeyID is the AWS Access Key ID - Required.
AccessKeyID string `json:"AccessKeyID"`
// SecretAccessKey is the AWS Secret Access Key - Required.
SecretAccessKey string `json:"SecretAccessKey"`

View file

@ -329,7 +329,7 @@ type SubjectTokenSupplier interface {
type AwsSecurityCredentialsSupplier interface {
// AwsRegion should return the AWS region or an error.
AwsRegion(ctx context.Context, options SupplierOptions) (string, error)
// GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error.
// AwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error.
// The external account token source does not cache the returned security credentials, so caching
// logic should be implemented in the supplier to prevent multiple requests for the same security credentials.
AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error)

View file

@ -56,7 +56,7 @@ type Config struct {
// the OAuth flow, after the resource owner's URLs.
RedirectURL string
// Scope specifies optional requested permissions.
// Scopes specifies optional requested permissions.
Scopes []string
// authStyleCache caches which auth style to use when Endpoint.AuthStyle is

View file

@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return sendfile(outfd, infd, offset, count)
}
func Dup3(oldfd, newfd, flags int) error {
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
return EINVAL
}
how := F_DUP2FD
if flags&O_CLOEXEC != 0 {
how = F_DUP2FD_CLOEXEC
}
_, err := fcntl(oldfd, how, newfd)
return err
}
/*
* Exposed directly
*/

View file

@ -43,8 +43,8 @@ type DLL struct {
// LoadDLL loads DLL file into memory.
//
// Warning: using LoadDLL without an absolute path name is subject to
// DLL preloading attacks. To safely load a system DLL, use LazyDLL
// with System set to true, or use LoadLibraryEx directly.
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL],
// or use [LoadLibraryEx] directly.
func LoadDLL(name string) (dll *DLL, err error) {
namep, err := UTF16PtrFromString(name)
if err != nil {
@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc {
}
// NewLazyDLL creates new LazyDLL associated with DLL file.
//
// Warning: using NewLazyDLL without an absolute path name is subject to
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL].
func NewLazyDLL(name string) *LazyDLL {
return &LazyDLL{Name: name}
}
@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) {
}
return &DLL{Name: name, Handle: h}, nil
}
type errString string
func (s errString) Error() string { return string(s) }

14
vendor/modules.txt vendored
View file

@ -58,7 +58,7 @@ dario.cat/mergo
github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-09-01/network
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources
github.com/Azure/azure-sdk-for-go/version
# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0
# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/azcore
github.com/Azure/azure-sdk-for-go/sdk/azcore/arm
@ -81,7 +81,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime
github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming
github.com/Azure/azure-sdk-for-go/sdk/azcore/to
github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing
# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/azidentity
github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal
@ -103,7 +103,7 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources
# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage
# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1
# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob
@ -743,7 +743,7 @@ github.com/gobwas/glob/util/strings
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
github.com/gogo/protobuf/proto
# github.com/golang-jwt/jwt/v4 v4.5.0
# github.com/golang-jwt/jwt/v4 v4.5.1
## explicit; go 1.16
github.com/golang-jwt/jwt/v4
# github.com/golang-jwt/jwt/v5 v5.2.1
@ -1021,7 +1021,7 @@ github.com/oracle/oci-go-sdk/v54/identity
github.com/oracle/oci-go-sdk/v54/objectstorage
github.com/oracle/oci-go-sdk/v54/objectstorage/transfer
github.com/oracle/oci-go-sdk/v54/workrequests
# github.com/osbuild/images v0.105.0
# github.com/osbuild/images v0.109.0
## explicit; go 1.21.0
github.com/osbuild/images/internal/common
github.com/osbuild/images/internal/environment
@ -1374,7 +1374,7 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/internal/timeseries
golang.org/x/net/trace
# golang.org/x/oauth2 v0.24.0
# golang.org/x/oauth2 v0.25.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
@ -1390,7 +1390,7 @@ golang.org/x/oauth2/jwt
## explicit; go 1.18
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
# golang.org/x/sys v0.28.0
# golang.org/x/sys v0.29.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/execabs