deps: update images to v0.18.0
Update the images dependency to v0.18.0 This includes a change in the Fedora IoT remote configuration which is now installed through an RPM instead of being hard-coded in the image definitions.
This commit is contained in:
parent
bb76ddb2b1
commit
5b19bd6809
161 changed files with 17972 additions and 6525 deletions
50
go.mod
50
go.mod
|
|
@ -12,7 +12,7 @@ require (
|
|||
github.com/Azure/go-autorest/autorest v0.11.29
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
|
||||
github.com/BurntSushi/toml v1.3.2
|
||||
github.com/aws/aws-sdk-go v1.47.9
|
||||
github.com/aws/aws-sdk-go v1.48.1
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/deepmap/oapi-codegen v1.8.2
|
||||
|
|
@ -31,7 +31,7 @@ require (
|
|||
github.com/labstack/gommon v0.4.1
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.385
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0
|
||||
github.com/osbuild/images v0.15.0
|
||||
github.com/osbuild/images v0.18.0
|
||||
github.com/osbuild/pulp-client v0.1.0
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
github.com/segmentio/ksuid v1.0.4
|
||||
|
|
@ -40,18 +40,18 @@ require (
|
|||
github.com/stretchr/testify v1.8.4
|
||||
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453
|
||||
github.com/vmware/govmomi v0.33.1
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
|
||||
golang.org/x/oauth2 v0.14.0
|
||||
golang.org/x/sync v0.5.0
|
||||
golang.org/x/sys v0.14.0
|
||||
google.golang.org/api v0.150.0
|
||||
google.golang.org/api v0.151.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.110.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v1.1.3 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect
|
||||
|
|
@ -68,22 +68,23 @@ require (
|
|||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containers/common v0.56.0 // indirect
|
||||
github.com/containers/image/v5 v5.28.0 // indirect
|
||||
github.com/containers/common v0.57.0 // indirect
|
||||
github.com/containers/image/v5 v5.29.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/containers/ocicrypt v1.1.8 // indirect
|
||||
github.com/containers/storage v1.50.2 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect
|
||||
github.com/containers/ocicrypt v1.1.9 // indirect
|
||||
github.com/containers/storage v1.51.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker v24.0.6+incompatible // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v24.0.7+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
|
|
@ -119,7 +120,7 @@ require (
|
|||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/compress v1.17.3 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
|
||||
|
|
@ -127,54 +128,53 @@ require (
|
|||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.17 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.18 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.18 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
|
||||
github.com/opencontainers/runc v1.1.9 // indirect
|
||||
github.com/opencontainers/runc v1.1.10 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.3 // indirect
|
||||
github.com/sigstore/rekor v1.2.2 // indirect
|
||||
github.com/sigstore/sigstore v1.7.3 // indirect
|
||||
github.com/sigstore/sigstore v1.7.5 // indirect
|
||||
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.6.1 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.6.2 // indirect
|
||||
go.mongodb.org/mongo-driver v1.11.3 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.15.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/mod v0.13.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/term v0.14.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect
|
||||
golang.org/x/tools v0.14.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect
|
||||
|
|
|
|||
123
go.sum
123
go.sum
|
|
@ -12,14 +12,15 @@ cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYE
|
|||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 h1:t5+QXLCK9SVi0PPdaY0PrFvYUo24KwA0QwxnaHRSVd4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 h1:LNHhpdK7hzUcx/k1LIcuh5k7k1LGIWLQfCjaneSj7Fc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
|
||||
|
|
@ -46,7 +47,7 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
|
|||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
|
|
@ -60,8 +61,8 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6
|
|||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-sdk-go v1.47.9 h1:rarTsos0mA16q+huicGx0e560aYRtOucV5z2Mw23JRY=
|
||||
github.com/aws/aws-sdk-go v1.47.9/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.48.1 h1:OXPUVL4cLdsDsqkVIuhwY+D389tjI7e1xu0lsDYyeMk=
|
||||
github.com/aws/aws-sdk-go v1.48.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
|
@ -75,16 +76,16 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/containers/common v0.56.0 h1:hysHUsEai1EkMXanU26UV55wMXns/a6AYmaFqJ4fEMY=
|
||||
github.com/containers/common v0.56.0/go.mod h1:IjaDdfUtcs2CfCcJMZxuut4XlvkTkY9Nlqkso9xCOq4=
|
||||
github.com/containers/image/v5 v5.28.0 h1:H4cWbdI88UA/mDb6SxMo3IxpmS1BSs/Kifvhwt9g048=
|
||||
github.com/containers/image/v5 v5.28.0/go.mod h1:9aPnNkwHNHgGl9VlQxXEshvmOJRbdRAc1rNDD6sP2eU=
|
||||
github.com/containers/common v0.57.0 h1:5O/+6QUBafKK0/zeok9y1rLPukfWgdE0sT4nuzmyAqk=
|
||||
github.com/containers/common v0.57.0/go.mod h1:t/Z+/sFrapvFMEJe3YnecN49/Tae2wYEQShbEN6SRaU=
|
||||
github.com/containers/image/v5 v5.29.0 h1:9+nhS/ZM7c4Kuzu5tJ0NMpxrgoryOJ2HAYTgG8Ny7j4=
|
||||
github.com/containers/image/v5 v5.29.0/go.mod h1:kQ7qcDsps424ZAz24thD+x7+dJw1vgur3A9tTDsj97E=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.1.8 h1:saSBF0/8DyPUjzcxMVzL2OBUWCkvRvqIm75pu0ADSZk=
|
||||
github.com/containers/ocicrypt v1.1.8/go.mod h1:jM362hyBtbwLMWzXQZTlkjKGAQf/BN/LFMtH0FIRt34=
|
||||
github.com/containers/storage v1.50.2 h1:Fys4BjFUVNRBEXlO70hFI48VW4EXsgnGisTpk9tTMsE=
|
||||
github.com/containers/storage v1.50.2/go.mod h1:dpspZsUrcKD8SpTofvKWhwPDHD0MkO4Q7VE+oYdWkiA=
|
||||
github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
|
||||
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
|
||||
github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA=
|
||||
github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
|
|
@ -94,8 +95,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
|||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd h1:0av0vtcjA8Hqv5gyWj79CLCFVwOOyBNWPjrfUWceMNg=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
@ -104,15 +105,19 @@ github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRk
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
|
||||
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE=
|
||||
github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02 h1:tR3jsKPiO/mb6ntzk/dJlHZtm37CPfVp1C9KIo534+4=
|
||||
|
|
@ -131,11 +136,11 @@ github.com/getkin/kin-openapi v0.93.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac
|
|||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
|
||||
github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
|
||||
github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
|
||||
|
|
@ -211,6 +216,7 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
|
|||
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
|
|
@ -219,7 +225,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
|
|||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
|
|
@ -229,6 +234,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
|||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
|
@ -351,8 +357,8 @@ github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaR
|
|||
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg=
|
||||
|
|
@ -406,8 +412,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
|||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
||||
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
|
||||
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/microcosm-cc/bluemonday v1.0.18 h1:6HcxvXDAi3ARt3slx6nTesbvorIc3QeTzBNRvWktHBo=
|
||||
|
|
@ -420,35 +426,38 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
|
|||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
||||
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU=
|
||||
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM=
|
||||
github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
|
||||
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.385 h1:EZs7CDfxtJEwywCERdNX6rApyFaJ+kB1W6nk3jROPwM=
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.385/go.mod h1:/+VFIw1iW2H0jEkFH4GnbL/liWareyzsL0w7mDIudB4=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
|
||||
github.com/osbuild/images v0.15.0 h1:9Fuaet/gWQ5WoX/nE2ooTcxgyWnlwTwk+w4+9JVjqRs=
|
||||
github.com/osbuild/images v0.15.0/go.mod h1:N+XvrV/SoMeN7i2H499tOZMK6HV6JNzlA/wH3+DF/9g=
|
||||
github.com/osbuild/images v0.18.0 h1:I/tOO7DCECciJptrXVq+oykJI5dP1rwkzJqmf2rKuqw=
|
||||
github.com/osbuild/images v0.18.0/go.mod h1:Zr+AkaX/Rpxyff6Zxh8kkwGKFtJsSukGo1Vv/j9HsxA=
|
||||
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=
|
||||
github.com/osbuild/pulp-client v0.1.0/go.mod h1:rd/MLdfwwO2cQI1s056h8z32zAi3Bo90XhlAAryIvWc=
|
||||
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
|
@ -462,8 +471,8 @@ github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7G
|
|||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||
|
|
@ -474,7 +483,7 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
|
|||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
|
|
@ -487,12 +496,12 @@ github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O
|
|||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8=
|
||||
github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI=
|
||||
github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
|
||||
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
|
||||
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
|
||||
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
|
||||
github.com/sigstore/sigstore v1.7.3 h1:HVVTfrMezJeLyl2xhJ8edzkrEGBa4KxjQZB4FlQ4JLU=
|
||||
github.com/sigstore/sigstore v1.7.3/go.mod h1:cl0c7Dtg3MM3c13L8pqqrfrmBa0eM3POcdtBepjylmw=
|
||||
github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
|
||||
github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
|
|
@ -527,8 +536,6 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
|
|||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA=
|
||||
github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
|
||||
|
|
@ -545,8 +552,8 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ
|
|||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbauerster/mpb/v8 v8.6.1 h1:XbBpIbJxJOO9yMcKPpI4oEFPW6tLAptefNQJNcGWri8=
|
||||
github.com/vbauerster/mpb/v8 v8.6.1/go.mod h1:S0tuIjikxlLxCeNijNhwAuD/BB3UE/d2nygG8SOldk0=
|
||||
github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA=
|
||||
github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vmware/govmomi v0.33.1 h1:qS2VpEBd/WLbzLO5McI6h5o5zaKsrezUxRY5r9jkW8A=
|
||||
|
|
@ -606,8 +613,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
|
|||
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
|
||||
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
|
|
@ -615,14 +622,13 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
|||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
|
|
@ -692,6 +698,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
|
|
@ -717,8 +724,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
|
||||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
@ -727,12 +734,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE=
|
||||
google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg=
|
||||
google.golang.org/api v0.151.0 h1:FhfXLO/NFdJIzQtCqjpysWwqKk8AzGWBUhMIx67cVDU=
|
||||
google.golang.org/api v0.151.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
|
|
|
|||
12
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
12
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
|
|
@ -1,5 +1,17 @@
|
|||
# Release History
|
||||
|
||||
## 1.7.2 (2023-09-06)
|
||||
|
||||
### Bugs Fixed
|
||||
|
||||
* Fix default HTTP transport to work in WASM modules.
|
||||
|
||||
## 1.7.1 (2023-08-14)
|
||||
|
||||
## Bugs Fixed
|
||||
|
||||
* Enable TLS renegotiation in the default transport policy.
|
||||
|
||||
## 1.7.0 (2023-07-12)
|
||||
|
||||
### Features Added
|
||||
|
|
|
|||
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
generated
vendored
|
|
@ -32,5 +32,5 @@ const (
|
|||
Module = "azcore"
|
||||
|
||||
// Version is the semantic version (see http://semver.org) of this module.
|
||||
Version = "v1.7.0"
|
||||
Version = "v1.7.2"
|
||||
)
|
||||
|
|
|
|||
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go
generated
vendored
Normal file
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
//go:build !wasm
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
)
|
||||
|
||||
func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
|
||||
return dialer.DialContext
|
||||
}
|
||||
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go
generated
vendored
Normal file
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
//go:build (js && wasm) || wasip1
|
||||
|
||||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
)
|
||||
|
||||
func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -18,10 +18,10 @@ var defaultHTTPClient *http.Client
|
|||
func init() {
|
||||
defaultTransport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
DialContext: defaultTransportDialContext(&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
}),
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
|
|
@ -29,6 +29,7 @@ func init() {
|
|||
ExpectContinueTimeout: 1 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Renegotiation: tls.RenegotiateFreelyAsClient,
|
||||
},
|
||||
}
|
||||
defaultHTTPClient = &http.Client{
|
||||
|
|
|
|||
47
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
47
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
|
|
@ -31,6 +31,8 @@ package endpointcreds
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
|
@ -69,7 +71,37 @@ type Provider struct {
|
|||
|
||||
// Optional authorization token value if set will be used as the value of
|
||||
// the Authorization header of the endpoint credential request.
|
||||
//
|
||||
// When constructed from environment, the provider will use the value of
|
||||
// AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
|
||||
//
|
||||
// Will be overridden if AuthorizationTokenProvider is configured
|
||||
AuthorizationToken string
|
||||
|
||||
// Optional auth provider func to dynamically load the auth token from a file
|
||||
// everytime a credential is retrieved
|
||||
//
|
||||
// When constructed from environment, the provider will read and use the content
|
||||
// of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
|
||||
// as the auth token everytime credentials are retrieved
|
||||
//
|
||||
// Will override AuthorizationToken if configured
|
||||
AuthorizationTokenProvider AuthTokenProvider
|
||||
}
|
||||
|
||||
// AuthTokenProvider defines an interface to dynamically load a value to be passed
|
||||
// for the Authorization header of a credentials request.
|
||||
type AuthTokenProvider interface {
|
||||
GetToken() (string, error)
|
||||
}
|
||||
|
||||
// TokenProviderFunc is a func type implementing AuthTokenProvider interface
|
||||
// and enables customizing token provider behavior
|
||||
type TokenProviderFunc func() (string, error)
|
||||
|
||||
// GetToken func retrieves auth token according to TokenProviderFunc implementation
|
||||
func (p TokenProviderFunc) GetToken() (string, error) {
|
||||
return p()
|
||||
}
|
||||
|
||||
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
|
||||
|
|
@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error
|
|||
req := p.Client.NewRequest(op, nil, out)
|
||||
req.SetContext(ctx)
|
||||
req.HTTPRequest.Header.Set("Accept", "application/json")
|
||||
if authToken := p.AuthorizationToken; len(authToken) != 0 {
|
||||
|
||||
authToken := p.AuthorizationToken
|
||||
var err error
|
||||
if p.AuthorizationTokenProvider != nil {
|
||||
authToken, err = p.AuthorizationTokenProvider.GetToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get authorization token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.ContainsAny(authToken, "\r\n") {
|
||||
return nil, fmt.Errorf("authorization token contains invalid newline sequence")
|
||||
}
|
||||
if len(authToken) != 0 {
|
||||
req.HTTPRequest.Header.Set("Authorization", authToken)
|
||||
}
|
||||
|
||||
|
|
|
|||
64
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
64
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
|
|
@ -9,6 +9,7 @@ package defaults
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -115,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
|
|||
|
||||
const (
|
||||
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
|
||||
httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
|
||||
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
|
||||
)
|
||||
|
||||
// direct representation of the IPv4 address for the ECS container
|
||||
// "169.254.170.2"
|
||||
var ecsContainerIPv4 net.IP = []byte{
|
||||
169, 254, 170, 2,
|
||||
}
|
||||
|
||||
// direct representation of the IPv4 address for the EKS container
|
||||
// "169.254.170.23"
|
||||
var eksContainerIPv4 net.IP = []byte{
|
||||
169, 254, 170, 23,
|
||||
}
|
||||
|
||||
// direct representation of the IPv6 address for the EKS container
|
||||
// "fd00:ec2::23"
|
||||
var eksContainerIPv6 net.IP = []byte{
|
||||
0xFD, 0, 0xE, 0xC2,
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0x23,
|
||||
}
|
||||
|
||||
// RemoteCredProvider returns a credentials provider for the default remote
|
||||
// endpoints such as EC2 or ECS Roles.
|
||||
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||
|
|
@ -135,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
|
|||
|
||||
var lookupHostFn = net.LookupHost
|
||||
|
||||
func isLoopbackHost(host string) (bool, error) {
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
return ip.IsLoopback(), nil
|
||||
// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
|
||||
//
|
||||
// host can either be an IP address OR an unresolved hostname - resolution will
|
||||
// be automatically performed in the latter case
|
||||
func isAllowedHost(host string) (bool, error) {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
return isIPAllowed(ip), nil
|
||||
}
|
||||
|
||||
// Host is not an ip, perform lookup
|
||||
addrs, err := lookupHostFn(host)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if !net.ParseIP(addr).IsLoopback() {
|
||||
if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
|
@ -155,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func isIPAllowed(ip net.IP) bool {
|
||||
return ip.IsLoopback() ||
|
||||
ip.Equal(ecsContainerIPv4) ||
|
||||
ip.Equal(eksContainerIPv4) ||
|
||||
ip.Equal(eksContainerIPv6)
|
||||
}
|
||||
|
||||
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
||||
var errMsg string
|
||||
|
||||
|
|
@ -165,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string)
|
|||
host := aws.URLHostname(parsed)
|
||||
if len(host) == 0 {
|
||||
errMsg = "unable to parse host from local HTTP cred provider URL"
|
||||
} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
|
||||
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
|
||||
} else if !isLoopback {
|
||||
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
|
||||
} else if parsed.Scheme == "http" {
|
||||
if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
|
||||
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr)
|
||||
} else if !isAllowedHost {
|
||||
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -190,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede
|
|||
func(p *endpointcreds.Provider) {
|
||||
p.ExpiryWindow = 5 * time.Minute
|
||||
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
|
||||
if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
|
||||
p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
|
||||
if contents, err := ioutil.ReadFile(authFilePath); err != nil {
|
||||
return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
|
||||
} else {
|
||||
return string(contents), nil
|
||||
}
|
||||
})
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
|
|
|||
336
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
336
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
|
@ -1040,6 +1040,21 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1-fips",
|
||||
}: endpoint{
|
||||
Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -3977,6 +3992,12 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -4001,6 +4022,51 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "fips-ca-central-1",
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-2",
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-2",
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -4016,15 +4082,39 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"autoscaling-plans": service{
|
||||
|
|
@ -6214,15 +6304,27 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -6244,6 +6346,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -6298,6 +6403,12 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -6993,6 +7104,14 @@ var awsPartition = partition{
|
|||
Region: "ap-south-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
}: endpoint{
|
||||
Hostname: "compute-optimizer.ap-south-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-south-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{
|
||||
|
|
@ -7009,6 +7128,22 @@ var awsPartition = partition{
|
|||
Region: "ap-southeast-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{
|
||||
Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-3",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{
|
||||
Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-4",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{
|
||||
|
|
@ -7025,6 +7160,14 @@ var awsPartition = partition{
|
|||
Region: "eu-central-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-2",
|
||||
}: endpoint{
|
||||
Hostname: "compute-optimizer.eu-central-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-central-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{
|
||||
|
|
@ -7041,6 +7184,14 @@ var awsPartition = partition{
|
|||
Region: "eu-south-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{
|
||||
Hostname: "compute-optimizer.eu-south-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-south-2",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{
|
||||
|
|
@ -7065,6 +7216,22 @@ var awsPartition = partition{
|
|||
Region: "eu-west-3",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{
|
||||
Hostname: "compute-optimizer.il-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "il-central-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{
|
||||
Hostname: "compute-optimizer.me-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "me-central-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{
|
||||
|
|
@ -11595,6 +11762,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -11667,6 +11837,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -11713,6 +11886,9 @@ var awsPartition = partition{
|
|||
},
|
||||
"emr-serverless": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -11722,6 +11898,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -11731,6 +11910,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -11746,6 +11928,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -17900,6 +18085,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -18193,6 +18381,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -18660,46 +18851,6 @@ var awsPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"macie": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "macie-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-2",
|
||||
}: endpoint{
|
||||
Hostname: "macie-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "macie-fips.us-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "macie-fips.us-west-2.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"macie2": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
|
@ -21265,12 +21416,21 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -26574,6 +26734,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -35257,12 +35420,42 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
"appconfigdata": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
|
|
@ -40125,20 +40318,40 @@ var awsusgovPartition = partition{
|
|||
"simspaceweaver": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Region: "fips-us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -40357,6 +40570,24 @@ var awsusgovPartition = partition{
|
|||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "sso.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1-fips",
|
||||
}: endpoint{
|
||||
Hostname: "sso.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{
|
||||
|
|
@ -40365,6 +40596,24 @@ var awsusgovPartition = partition{
|
|||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "sso.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1-fips",
|
||||
}: endpoint{
|
||||
Hostname: "sso.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
|
@ -41396,6 +41645,9 @@ var awsisoPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-iso-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-iso-west-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"ec2": service{
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.47.9"
|
||||
const SDKVersion = "1.48.1"
|
||||
|
|
|
|||
177
vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
generated
vendored
177
vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go
generated
vendored
|
|
@ -6512,10 +6512,7 @@ func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInp
|
|||
|
||||
// StartInstanceRefresh API operation for Auto Scaling.
|
||||
//
|
||||
// Starts an instance refresh. During an instance refresh, Amazon EC2 Auto Scaling
|
||||
// performs a rolling update of instances in an Auto Scaling group. Instances
|
||||
// are terminated first and then replaced, which temporarily reduces the capacity
|
||||
// available within your Auto Scaling group.
|
||||
// Starts an instance refresh.
|
||||
//
|
||||
// This operation is part of the instance refresh feature (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
|
||||
// in Amazon EC2 Auto Scaling, which helps you update instances in your Auto
|
||||
|
|
@ -8302,6 +8299,11 @@ type CreateAutoScalingGroupInput struct {
|
|||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
InstanceId *string `min:"1" type:"string"`
|
||||
|
||||
// An instance maintenance policy. For more information, see Set instance maintenance
|
||||
// policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
InstanceMaintenancePolicy *InstanceMaintenancePolicy `type:"structure"`
|
||||
|
||||
// The name of the launch configuration to use to launch instances.
|
||||
//
|
||||
// Conditional: You must specify either a launch template (LaunchTemplate or
|
||||
|
|
@ -8480,6 +8482,11 @@ func (s *CreateAutoScalingGroupInput) Validate() error {
|
|||
if s.VPCZoneIdentifier != nil && len(*s.VPCZoneIdentifier) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("VPCZoneIdentifier", 1))
|
||||
}
|
||||
if s.InstanceMaintenancePolicy != nil {
|
||||
if err := s.InstanceMaintenancePolicy.Validate(); err != nil {
|
||||
invalidParams.AddNested("InstanceMaintenancePolicy", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.LaunchTemplate != nil {
|
||||
if err := s.LaunchTemplate.Validate(); err != nil {
|
||||
invalidParams.AddNested("LaunchTemplate", err.(request.ErrInvalidParams))
|
||||
|
|
@ -8593,6 +8600,12 @@ func (s *CreateAutoScalingGroupInput) SetInstanceId(v string) *CreateAutoScaling
|
|||
return s
|
||||
}
|
||||
|
||||
// SetInstanceMaintenancePolicy sets the InstanceMaintenancePolicy field's value.
|
||||
func (s *CreateAutoScalingGroupInput) SetInstanceMaintenancePolicy(v *InstanceMaintenancePolicy) *CreateAutoScalingGroupInput {
|
||||
s.InstanceMaintenancePolicy = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetLaunchConfigurationName sets the LaunchConfigurationName field's value.
|
||||
func (s *CreateAutoScalingGroupInput) SetLaunchConfigurationName(v string) *CreateAutoScalingGroupInput {
|
||||
s.LaunchConfigurationName = &v
|
||||
|
|
@ -13605,6 +13618,9 @@ type Group struct {
|
|||
// HealthCheckType is a required field
|
||||
HealthCheckType *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// An instance maintenance policy.
|
||||
InstanceMaintenancePolicy *InstanceMaintenancePolicy `type:"structure"`
|
||||
|
||||
// The EC2 instances associated with the group.
|
||||
Instances []*Instance `type:"list"`
|
||||
|
||||
|
|
@ -13774,6 +13790,12 @@ func (s *Group) SetHealthCheckType(v string) *Group {
|
|||
return s
|
||||
}
|
||||
|
||||
// SetInstanceMaintenancePolicy sets the InstanceMaintenancePolicy field's value.
|
||||
func (s *Group) SetInstanceMaintenancePolicy(v *InstanceMaintenancePolicy) *Group {
|
||||
s.InstanceMaintenancePolicy = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetInstances sets the Instances field's value.
|
||||
func (s *Group) SetInstances(v []*Instance) *Group {
|
||||
s.Instances = v
|
||||
|
|
@ -14164,6 +14186,78 @@ func (s *InstanceDetails) SetWeightedCapacity(v string) *InstanceDetails {
|
|||
return s
|
||||
}
|
||||
|
||||
// Describes an instance maintenance policy.
|
||||
//
|
||||
// For more information, see Set instance maintenance policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
type InstanceMaintenancePolicy struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies the upper threshold as a percentage of the desired capacity of
|
||||
// the Auto Scaling group. It represents the maximum percentage of the group
|
||||
// that can be in service and healthy, or pending, to support your workload
|
||||
// when replacing instances. Value range is 100 to 200. After it's set, a value
|
||||
// of -1 will clear the previously set value.
|
||||
//
|
||||
// Both MinHealthyPercentage and MaxHealthyPercentage must be specified, and
|
||||
// the difference between them cannot be greater than 100. A large range increases
|
||||
// the number of instances that can be replaced at the same time.
|
||||
MaxHealthyPercentage *int64 `type:"integer"`
|
||||
|
||||
// Specifies the lower threshold as a percentage of the desired capacity of
|
||||
// the Auto Scaling group. It represents the minimum percentage of the group
|
||||
// to keep in service, healthy, and ready to use to support your workload when
|
||||
// replacing instances. Value range is 0 to 100. After it's set, a value of
|
||||
// -1 will clear the previously set value.
|
||||
MinHealthyPercentage *int64 `type:"integer"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s InstanceMaintenancePolicy) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s InstanceMaintenancePolicy) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *InstanceMaintenancePolicy) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "InstanceMaintenancePolicy"}
|
||||
if s.MaxHealthyPercentage != nil && *s.MaxHealthyPercentage < -1 {
|
||||
invalidParams.Add(request.NewErrParamMinValue("MaxHealthyPercentage", -1))
|
||||
}
|
||||
if s.MinHealthyPercentage != nil && *s.MinHealthyPercentage < -1 {
|
||||
invalidParams.Add(request.NewErrParamMinValue("MinHealthyPercentage", -1))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMaxHealthyPercentage sets the MaxHealthyPercentage field's value.
|
||||
func (s *InstanceMaintenancePolicy) SetMaxHealthyPercentage(v int64) *InstanceMaintenancePolicy {
|
||||
s.MaxHealthyPercentage = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMinHealthyPercentage sets the MinHealthyPercentage field's value.
|
||||
func (s *InstanceMaintenancePolicy) SetMinHealthyPercentage(v int64) *InstanceMaintenancePolicy {
|
||||
s.MinHealthyPercentage = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// The metadata options for the instances. For more information, see Configuring
|
||||
// the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
|
|
@ -19000,14 +19094,26 @@ type RefreshPreferences struct {
|
|||
// in all cases), or the HealthCheckGracePeriod property otherwise.
|
||||
InstanceWarmup *int64 `type:"integer"`
|
||||
|
||||
// The amount of capacity in the Auto Scaling group that must pass your group's
|
||||
// health checks to allow the operation to continue. The value is expressed
|
||||
// as a percentage of the desired capacity of the Auto Scaling group (rounded
|
||||
// up to the nearest integer). The default is 90.
|
||||
// Specifies the maximum percentage of the group that can be in service and
|
||||
// healthy, or pending, to support your workload when replacing instances. The
|
||||
// value is expressed as a percentage of the desired capacity of the Auto Scaling
|
||||
// group. Value range is 100 to 200.
|
||||
//
|
||||
// Setting the minimum healthy percentage to 100 percent limits the rate of
|
||||
// replacement to one instance at a time. In contrast, setting it to 0 percent
|
||||
// has the effect of replacing all instances at the same time.
|
||||
// If you specify MaxHealthyPercentage, you must also specify MinHealthyPercentage,
|
||||
// and the difference between them cannot be greater than 100. A larger range
|
||||
// increases the number of instances that can be replaced at the same time.
|
||||
//
|
||||
// If you do not specify this property, the default is 100 percent, or the percentage
|
||||
// set in the instance maintenance policy for the Auto Scaling group, if defined.
|
||||
MaxHealthyPercentage *int64 `min:"100" type:"integer"`
|
||||
|
||||
// Specifies the minimum percentage of the group to keep in service, healthy,
|
||||
// and ready to use to support your workload to allow the operation to continue.
|
||||
// The value is expressed as a percentage of the desired capacity of the Auto
|
||||
// Scaling group. Value range is 0 to 100.
|
||||
//
|
||||
// If you do not specify this property, the default is 90 percent, or the percentage
|
||||
// set in the instance maintenance policy for the Auto Scaling group, if defined.
|
||||
MinHealthyPercentage *int64 `type:"integer"`
|
||||
|
||||
// Choose the behavior that you want Amazon EC2 Auto Scaling to use if instances
|
||||
|
|
@ -19081,6 +19187,19 @@ func (s RefreshPreferences) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *RefreshPreferences) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "RefreshPreferences"}
|
||||
if s.MaxHealthyPercentage != nil && *s.MaxHealthyPercentage < 100 {
|
||||
invalidParams.Add(request.NewErrParamMinValue("MaxHealthyPercentage", 100))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAlarmSpecification sets the AlarmSpecification field's value.
|
||||
func (s *RefreshPreferences) SetAlarmSpecification(v *AlarmSpecification) *RefreshPreferences {
|
||||
s.AlarmSpecification = v
|
||||
|
|
@ -19111,6 +19230,12 @@ func (s *RefreshPreferences) SetInstanceWarmup(v int64) *RefreshPreferences {
|
|||
return s
|
||||
}
|
||||
|
||||
// SetMaxHealthyPercentage sets the MaxHealthyPercentage field's value.
|
||||
func (s *RefreshPreferences) SetMaxHealthyPercentage(v int64) *RefreshPreferences {
|
||||
s.MaxHealthyPercentage = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMinHealthyPercentage sets the MinHealthyPercentage field's value.
|
||||
func (s *RefreshPreferences) SetMinHealthyPercentage(v int64) *RefreshPreferences {
|
||||
s.MinHealthyPercentage = &v
|
||||
|
|
@ -20170,10 +20295,11 @@ type StartInstanceRefreshInput struct {
|
|||
DesiredConfiguration *DesiredConfiguration `type:"structure"`
|
||||
|
||||
// Sets your preferences for the instance refresh so that it performs as expected
|
||||
// when you start it. Includes the instance warmup time, the minimum healthy
|
||||
// percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use
|
||||
// if instances that are in Standby state or protected from scale in are found.
|
||||
// You can also choose to enable additional features, such as the following:
|
||||
// when you start it. Includes the instance warmup time, the minimum and maximum
|
||||
// healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling
|
||||
// to use if instances that are in Standby state or protected from scale in
|
||||
// are found. You can also choose to enable additional features, such as the
|
||||
// following:
|
||||
//
|
||||
// * Auto rollback
|
||||
//
|
||||
|
|
@ -20220,6 +20346,11 @@ func (s *StartInstanceRefreshInput) Validate() error {
|
|||
invalidParams.AddNested("DesiredConfiguration", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.Preferences != nil {
|
||||
if err := s.Preferences.Validate(); err != nil {
|
||||
invalidParams.AddNested("Preferences", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
|
|
@ -21326,6 +21457,11 @@ type UpdateAutoScalingGroupInput struct {
|
|||
// Only specify EC2 if you must clear a value that was previously set.
|
||||
HealthCheckType *string `min:"1" type:"string"`
|
||||
|
||||
// An instance maintenance policy. For more information, see Set instance maintenance
|
||||
// policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
InstanceMaintenancePolicy *InstanceMaintenancePolicy `type:"structure"`
|
||||
|
||||
// The name of the launch configuration. If you specify LaunchConfigurationName
|
||||
// in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy.
|
||||
LaunchConfigurationName *string `min:"1" type:"string"`
|
||||
|
|
@ -21443,6 +21579,11 @@ func (s *UpdateAutoScalingGroupInput) Validate() error {
|
|||
if s.VPCZoneIdentifier != nil && len(*s.VPCZoneIdentifier) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("VPCZoneIdentifier", 1))
|
||||
}
|
||||
if s.InstanceMaintenancePolicy != nil {
|
||||
if err := s.InstanceMaintenancePolicy.Validate(); err != nil {
|
||||
invalidParams.AddNested("InstanceMaintenancePolicy", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.LaunchTemplate != nil {
|
||||
if err := s.LaunchTemplate.Validate(); err != nil {
|
||||
invalidParams.AddNested("LaunchTemplate", err.(request.ErrInvalidParams))
|
||||
|
|
@ -21520,6 +21661,12 @@ func (s *UpdateAutoScalingGroupInput) SetHealthCheckType(v string) *UpdateAutoSc
|
|||
return s
|
||||
}
|
||||
|
||||
// SetInstanceMaintenancePolicy sets the InstanceMaintenancePolicy field's value.
|
||||
func (s *UpdateAutoScalingGroupInput) SetInstanceMaintenancePolicy(v *InstanceMaintenancePolicy) *UpdateAutoScalingGroupInput {
|
||||
s.InstanceMaintenancePolicy = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetLaunchConfigurationName sets the LaunchConfigurationName field's value.
|
||||
func (s *UpdateAutoScalingGroupInput) SetLaunchConfigurationName(v string) *UpdateAutoScalingGroupInput {
|
||||
s.LaunchConfigurationName = &v
|
||||
|
|
|
|||
4287
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
4287
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
File diff suppressed because it is too large
Load diff
2
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
|
|
@ -10246,7 +10246,7 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req
|
|||
// or an object, it checks the PublicAccessBlock configuration for both the
|
||||
// bucket (or the bucket that contains the object) and the bucket owner's account.
|
||||
// If the PublicAccessBlock configurations are different between the bucket
|
||||
// and the account, S3 uses the most restrictive combination of the bucket-level
|
||||
// and the account, Amazon S3 uses the most restrictive combination of the bucket-level
|
||||
// and account-level settings.
|
||||
//
|
||||
// For more information about when Amazon S3 considers a bucket or an object
|
||||
|
|
|
|||
664
vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
generated
vendored
664
vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
generated
vendored
|
|
@ -56,9 +56,10 @@ func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Requ
|
|||
|
||||
// CreateToken API operation for AWS SSO OIDC.
|
||||
//
|
||||
// Creates and returns an access token for the authorized client. The access
|
||||
// token issued will be used to fetch short-term credentials for the assigned
|
||||
// roles in the AWS account.
|
||||
// Creates and returns access and refresh tokens for clients that are authenticated
|
||||
// using client secrets. The access token can be used to fetch short-term credentials
|
||||
// for the assigned AWS accounts or to access application APIs using bearer
|
||||
// authentication.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
|
@ -133,6 +134,131 @@ func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInpu
|
|||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opCreateTokenWithIAM = "CreateTokenWithIAM"
|
||||
|
||||
// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the CreateTokenWithIAM operation. The "output" return
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfully.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
// the "output" return value is not valid until after Send returns without error.
|
||||
//
|
||||
// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM
|
||||
// API call, and error handling.
|
||||
//
|
||||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the CreateTokenWithIAMRequest method.
|
||||
// req, resp := client.CreateTokenWithIAMRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
|
||||
func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) {
|
||||
op := &request.Operation{
|
||||
Name: opCreateTokenWithIAM,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/token?aws_iam=t",
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = &CreateTokenWithIAMInput{}
|
||||
}
|
||||
|
||||
output = &CreateTokenWithIAMOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateTokenWithIAM API operation for AWS SSO OIDC.
|
||||
//
|
||||
// Creates and returns access and refresh tokens for clients and applications
|
||||
// that are authenticated using IAM entities. The access token can be used to
|
||||
// fetch short-term credentials for the assigned AWS accounts or to access application
|
||||
// APIs using bearer authentication.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
//
|
||||
// See the AWS API reference guide for AWS SSO OIDC's
|
||||
// API operation CreateTokenWithIAM for usage and error information.
|
||||
//
|
||||
// Returned Error Types:
|
||||
//
|
||||
// - InvalidRequestException
|
||||
// Indicates that something is wrong with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// - InvalidClientException
|
||||
// Indicates that the clientId or clientSecret in the request is invalid. For
|
||||
// example, this can occur when a client sends an incorrect clientId or an expired
|
||||
// clientSecret.
|
||||
//
|
||||
// - InvalidGrantException
|
||||
// Indicates that a request contains an invalid grant. This can occur if a client
|
||||
// makes a CreateToken request with an invalid grant type.
|
||||
//
|
||||
// - UnauthorizedClientException
|
||||
// Indicates that the client is not currently authorized to make the request.
|
||||
// This can happen when a clientId is not issued for a public client.
|
||||
//
|
||||
// - UnsupportedGrantTypeException
|
||||
// Indicates that the grant type in the request is not supported by the service.
|
||||
//
|
||||
// - InvalidScopeException
|
||||
// Indicates that the scope provided in the request is invalid.
|
||||
//
|
||||
// - AuthorizationPendingException
|
||||
// Indicates that a request to authorize a client with an access user session
|
||||
// token is pending.
|
||||
//
|
||||
// - SlowDownException
|
||||
// Indicates that the client is making the request too frequently and is more
|
||||
// than the service can handle.
|
||||
//
|
||||
// - AccessDeniedException
|
||||
// You do not have sufficient access to perform this action.
|
||||
//
|
||||
// - ExpiredTokenException
|
||||
// Indicates that the token issued by the service is expired and is no longer
|
||||
// valid.
|
||||
//
|
||||
// - InternalServerException
|
||||
// Indicates that an error from the service occurred while trying to process
|
||||
// a request.
|
||||
//
|
||||
// - InvalidRequestRegionException
|
||||
// Indicates that a token provided as input to the request was issued by and
|
||||
// is only usable by calling IAM Identity Center endpoints in another region.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
|
||||
func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) {
|
||||
req, out := c.CreateTokenWithIAMRequest(input)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of
|
||||
// the ability to pass a context and additional request options.
|
||||
//
|
||||
// See CreateTokenWithIAM for details on how to use this API operation.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) {
|
||||
req, out := c.CreateTokenWithIAMRequest(input)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opRegisterClient = "RegisterClient"
|
||||
|
||||
// RegisterClientRequest generates a "aws/request.Request" representing the
|
||||
|
|
@ -331,8 +457,11 @@ type AccessDeniedException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be access_denied.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -400,8 +529,11 @@ type AuthorizationPendingException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be authorization_pending.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -466,8 +598,8 @@ func (s *AuthorizationPendingException) RequestID() string {
|
|||
type CreateTokenInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The unique identifier string for each client. This value should come from
|
||||
// the persisted result of the RegisterClient API.
|
||||
// The unique identifier string for the client or application. This value comes
|
||||
// from the result of the RegisterClient API.
|
||||
//
|
||||
// ClientId is a required field
|
||||
ClientId *string `locationName:"clientId" type:"string" required:"true"`
|
||||
|
|
@ -475,23 +607,30 @@ type CreateTokenInput struct {
|
|||
// A secret string generated for the client. This value should come from the
|
||||
// persisted result of the RegisterClient API.
|
||||
//
|
||||
// ClientSecret is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenInput's
|
||||
// String and GoString methods.
|
||||
//
|
||||
// ClientSecret is a required field
|
||||
ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"`
|
||||
ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
|
||||
|
||||
// The authorization code received from the authorization service. This parameter
|
||||
// is required to perform an authorization grant request to get access to a
|
||||
// token.
|
||||
// Used only when calling this API for the Authorization Code grant type. The
|
||||
// short-term code is used to identify this authorization request. This grant
|
||||
// type is currently unsupported for the CreateToken API.
|
||||
Code *string `locationName:"code" type:"string"`
|
||||
|
||||
// Used only when calling this API for the device code grant type. This short-term
|
||||
// code is used to identify this authentication attempt. This should come from
|
||||
// an in-memory reference to the result of the StartDeviceAuthorization API.
|
||||
// Used only when calling this API for the Device Code grant type. This short-term
|
||||
// code is used to identify this authorization request. This comes from the
|
||||
// result of the StartDeviceAuthorization API.
|
||||
DeviceCode *string `locationName:"deviceCode" type:"string"`
|
||||
|
||||
// Supports grant types for the authorization code, refresh token, and device
|
||||
// code request. For device code requests, specify the following value:
|
||||
// Supports the following OAuth grant types: Device Code and Refresh Token.
|
||||
// Specify either of the following values, depending on the grant type that
|
||||
// you want:
|
||||
//
|
||||
// urn:ietf:params:oauth:grant-type:device_code
|
||||
// * Device Code - urn:ietf:params:oauth:grant-type:device_code
|
||||
//
|
||||
// * Refresh Token - refresh_token
|
||||
//
|
||||
// For information about how to obtain the device code, see the StartDeviceAuthorization
|
||||
// topic.
|
||||
|
|
@ -499,21 +638,28 @@ type CreateTokenInput struct {
|
|||
// GrantType is a required field
|
||||
GrantType *string `locationName:"grantType" type:"string" required:"true"`
|
||||
|
||||
// The location of the application that will receive the authorization code.
|
||||
// Users authorize the service to send the request to this location.
|
||||
// Used only when calling this API for the Authorization Code grant type. This
|
||||
// value specifies the location of the client or application that has registered
|
||||
// to receive the authorization code.
|
||||
RedirectUri *string `locationName:"redirectUri" type:"string"`
|
||||
|
||||
// Currently, refreshToken is not yet implemented and is not supported. For
|
||||
// more information about the features and limitations of the current IAM Identity
|
||||
// Center OIDC implementation, see Considerations for Using this Guide in the
|
||||
// IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
// Used only when calling this API for the Refresh Token grant type. This token
|
||||
// is used to refresh short-term tokens, such as the access token, that might
|
||||
// expire.
|
||||
//
|
||||
// The token used to obtain an access token in the event that the access token
|
||||
// is invalid or expired.
|
||||
RefreshToken *string `locationName:"refreshToken" type:"string"`
|
||||
// For more information about the features and limitations of the current IAM
|
||||
// Identity Center OIDC implementation, see Considerations for Using this Guide
|
||||
// in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
//
|
||||
// RefreshToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenInput's
|
||||
// String and GoString methods.
|
||||
RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
|
||||
|
||||
// The list of scopes that is defined by the client. Upon authorization, this
|
||||
// list is used to restrict permissions when granting an access token.
|
||||
// The list of scopes for which authorization is requested. The access token
|
||||
// that is issued is limited to the scopes that are granted. If this value is
|
||||
// not specified, IAM Identity Center authorizes all scopes that are configured
|
||||
// for the client during the call to RegisterClient.
|
||||
Scope []*string `locationName:"scope" type:"list"`
|
||||
}
|
||||
|
||||
|
|
@ -605,31 +751,43 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput {
|
|||
type CreateTokenOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// An opaque token to access IAM Identity Center resources assigned to a user.
|
||||
AccessToken *string `locationName:"accessToken" type:"string"`
|
||||
// A bearer token to access AWS accounts and applications assigned to a user.
|
||||
//
|
||||
// AccessToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenOutput's
|
||||
// String and GoString methods.
|
||||
AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
|
||||
|
||||
// Indicates the time in seconds when an access token will expire.
|
||||
ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
|
||||
|
||||
// Currently, idToken is not yet implemented and is not supported. For more
|
||||
// information about the features and limitations of the current IAM Identity
|
||||
// Center OIDC implementation, see Considerations for Using this Guide in the
|
||||
// IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
// The idToken is not implemented or supported. For more information about the
|
||||
// features and limitations of the current IAM Identity Center OIDC implementation,
|
||||
// see Considerations for Using this Guide in the IAM Identity Center OIDC API
|
||||
// Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
//
|
||||
// The identifier of the user that associated with the access token, if present.
|
||||
IdToken *string `locationName:"idToken" type:"string"`
|
||||
// A JSON Web Token (JWT) that identifies who is associated with the issued
|
||||
// access token.
|
||||
//
|
||||
// IdToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenOutput's
|
||||
// String and GoString methods.
|
||||
IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
|
||||
|
||||
// Currently, refreshToken is not yet implemented and is not supported. For
|
||||
// more information about the features and limitations of the current IAM Identity
|
||||
// Center OIDC implementation, see Considerations for Using this Guide in the
|
||||
// IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
//
|
||||
// A token that, if present, can be used to refresh a previously issued access
|
||||
// token that might have expired.
|
||||
RefreshToken *string `locationName:"refreshToken" type:"string"`
|
||||
//
|
||||
// For more information about the features and limitations of the current IAM
|
||||
// Identity Center OIDC implementation, see Considerations for Using this Guide
|
||||
// in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
//
|
||||
// RefreshToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenOutput's
|
||||
// String and GoString methods.
|
||||
RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
|
||||
|
||||
// Used to notify the client that the returned token is an access token. The
|
||||
// supported type is BearerToken.
|
||||
// supported token type is Bearer.
|
||||
TokenType *string `locationName:"tokenType" type:"string"`
|
||||
}
|
||||
|
||||
|
|
@ -681,14 +839,312 @@ func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput {
|
|||
return s
|
||||
}
|
||||
|
||||
type CreateTokenWithIAMInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Used only when calling this API for the JWT Bearer grant type. This value
|
||||
// specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize
|
||||
// a trusted token issuer, configure the JWT Bearer GrantOptions for the application.
|
||||
//
|
||||
// Assertion is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
|
||||
// String and GoString methods.
|
||||
Assertion *string `locationName:"assertion" type:"string" sensitive:"true"`
|
||||
|
||||
// The unique identifier string for the client or application. This value is
|
||||
// an application ARN that has OAuth grants configured.
|
||||
//
|
||||
// ClientId is a required field
|
||||
ClientId *string `locationName:"clientId" type:"string" required:"true"`
|
||||
|
||||
// Used only when calling this API for the Authorization Code grant type. This
|
||||
// short-term code is used to identify this authorization request. The code
|
||||
// is obtained through a redirect from IAM Identity Center to a redirect URI
|
||||
// persisted in the Authorization Code GrantOptions for the application.
|
||||
Code *string `locationName:"code" type:"string"`
|
||||
|
||||
// Supports the following OAuth grant types: Authorization Code, Refresh Token,
|
||||
// JWT Bearer, and Token Exchange. Specify one of the following values, depending
|
||||
// on the grant type that you want:
|
||||
//
|
||||
// * Authorization Code - authorization_code
|
||||
//
|
||||
// * Refresh Token - refresh_token
|
||||
//
|
||||
// * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
|
||||
//
|
||||
// * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
|
||||
//
|
||||
// GrantType is a required field
|
||||
GrantType *string `locationName:"grantType" type:"string" required:"true"`
|
||||
|
||||
// Used only when calling this API for the Authorization Code grant type. This
|
||||
// value specifies the location of the client or application that has registered
|
||||
// to receive the authorization code.
|
||||
RedirectUri *string `locationName:"redirectUri" type:"string"`
|
||||
|
||||
// Used only when calling this API for the Refresh Token grant type. This token
|
||||
// is used to refresh short-term tokens, such as the access token, that might
|
||||
// expire.
|
||||
//
|
||||
// For more information about the features and limitations of the current IAM
|
||||
// Identity Center OIDC implementation, see Considerations for Using this Guide
|
||||
// in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
//
|
||||
// RefreshToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
|
||||
// String and GoString methods.
|
||||
RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
|
||||
|
||||
// Used only when calling this API for the Token Exchange grant type. This value
|
||||
// specifies the type of token that the requester can receive. The following
|
||||
// values are supported:
|
||||
//
|
||||
// * Access Token - urn:ietf:params:oauth:token-type:access_token
|
||||
//
|
||||
// * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
|
||||
RequestedTokenType *string `locationName:"requestedTokenType" type:"string"`
|
||||
|
||||
// The list of scopes for which authorization is requested. The access token
|
||||
// that is issued is limited to the scopes that are granted. If the value is
|
||||
// not specified, IAM Identity Center authorizes all scopes configured for the
|
||||
// application, including the following default scopes: openid, aws, sts:identity_context.
|
||||
Scope []*string `locationName:"scope" type:"list"`
|
||||
|
||||
// Used only when calling this API for the Token Exchange grant type. This value
|
||||
// specifies the subject of the exchange. The value of the subject token must
|
||||
// be an access token issued by IAM Identity Center to a different client or
|
||||
// application. The access token must have authorized scopes that indicate the
|
||||
// requested application as a target audience.
|
||||
//
|
||||
// SubjectToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
|
||||
// String and GoString methods.
|
||||
SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"`
|
||||
|
||||
// Used only when calling this API for the Token Exchange grant type. This value
|
||||
// specifies the type of token that is passed as the subject of the exchange.
|
||||
// The following value is supported:
|
||||
//
|
||||
// * Access Token - urn:ietf:params:oauth:token-type:access_token
|
||||
SubjectTokenType *string `locationName:"subjectTokenType" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CreateTokenWithIAMInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CreateTokenWithIAMInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *CreateTokenWithIAMInput) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"}
|
||||
if s.ClientId == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("ClientId"))
|
||||
}
|
||||
if s.GrantType == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("GrantType"))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAssertion sets the Assertion field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput {
|
||||
s.Assertion = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetClientId sets the ClientId field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput {
|
||||
s.ClientId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetCode sets the Code field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput {
|
||||
s.Code = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetGrantType sets the GrantType field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput {
|
||||
s.GrantType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetRedirectUri sets the RedirectUri field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput {
|
||||
s.RedirectUri = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetRefreshToken sets the RefreshToken field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput {
|
||||
s.RefreshToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetRequestedTokenType sets the RequestedTokenType field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput {
|
||||
s.RequestedTokenType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetScope sets the Scope field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput {
|
||||
s.Scope = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSubjectToken sets the SubjectToken field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput {
|
||||
s.SubjectToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSubjectTokenType sets the SubjectTokenType field's value.
|
||||
func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput {
|
||||
s.SubjectTokenType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type CreateTokenWithIAMOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// A bearer token to access AWS accounts and applications assigned to a user.
|
||||
//
|
||||
// AccessToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
|
||||
// String and GoString methods.
|
||||
AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
|
||||
|
||||
// Indicates the time in seconds when an access token will expire.
|
||||
ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
|
||||
|
||||
// A JSON Web Token (JWT) that identifies the user associated with the issued
|
||||
// access token.
|
||||
//
|
||||
// IdToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
|
||||
// String and GoString methods.
|
||||
IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
|
||||
|
||||
// Indicates the type of tokens that are issued by IAM Identity Center. The
|
||||
// following values are supported:
|
||||
//
|
||||
// * Access Token - urn:ietf:params:oauth:token-type:access_token
|
||||
//
|
||||
// * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
|
||||
IssuedTokenType *string `locationName:"issuedTokenType" type:"string"`
|
||||
|
||||
// A token that, if present, can be used to refresh a previously issued access
|
||||
// token that might have expired.
|
||||
//
|
||||
// For more information about the features and limitations of the current IAM
|
||||
// Identity Center OIDC implementation, see Considerations for Using this Guide
|
||||
// in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
|
||||
//
|
||||
// RefreshToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
|
||||
// String and GoString methods.
|
||||
RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
|
||||
|
||||
// The list of scopes for which authorization is granted. The access token that
|
||||
// is issued is limited to the scopes that are granted.
|
||||
Scope []*string `locationName:"scope" type:"list"`
|
||||
|
||||
// Used to notify the requester that the returned token is an access token.
|
||||
// The supported token type is Bearer.
|
||||
TokenType *string `locationName:"tokenType" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CreateTokenWithIAMOutput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CreateTokenWithIAMOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetAccessToken sets the AccessToken field's value.
|
||||
func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput {
|
||||
s.AccessToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetExpiresIn sets the ExpiresIn field's value.
|
||||
func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput {
|
||||
s.ExpiresIn = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetIdToken sets the IdToken field's value.
|
||||
func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput {
|
||||
s.IdToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetIssuedTokenType sets the IssuedTokenType field's value.
|
||||
func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput {
|
||||
s.IssuedTokenType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetRefreshToken sets the RefreshToken field's value.
|
||||
func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput {
|
||||
s.RefreshToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetScope sets the Scope field's value.
|
||||
func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput {
|
||||
s.Scope = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTokenType sets the TokenType field's value.
|
||||
func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput {
|
||||
s.TokenType = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Indicates that the token issued by the service is expired and is no longer
|
||||
// valid.
|
||||
type ExpiredTokenException struct {
|
||||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be expired_token.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -756,8 +1212,11 @@ type InternalServerException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be server_error.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -826,8 +1285,11 @@ type InvalidClientException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be invalid_client.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -895,8 +1357,11 @@ type InvalidClientMetadataException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be invalid_client_metadata.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -964,8 +1429,11 @@ type InvalidGrantException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be invalid_grant.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -1033,8 +1501,11 @@ type InvalidRequestException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be invalid_request.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -1096,13 +1567,95 @@ func (s *InvalidRequestException) RequestID() string {
|
|||
return s.RespMetadata.RequestID
|
||||
}
|
||||
|
||||
// Indicates that a token provided as input to the request was issued by and
|
||||
// is only usable by calling IAM Identity Center endpoints in another region.
|
||||
type InvalidRequestRegionException struct {
|
||||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Indicates the IAM Identity Center endpoint which the requester may call with
|
||||
// this token.
|
||||
Endpoint *string `locationName:"endpoint" type:"string"`
|
||||
|
||||
// Single error code. For this exception the value will be invalid_request.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
||||
// Indicates the region which the requester may call with this token.
|
||||
Region *string `locationName:"region" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s InvalidRequestRegionException) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s InvalidRequestRegionException) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error {
|
||||
return &InvalidRequestRegionException{
|
||||
RespMetadata: v,
|
||||
}
|
||||
}
|
||||
|
||||
// Code returns the exception type name.
|
||||
func (s *InvalidRequestRegionException) Code() string {
|
||||
return "InvalidRequestRegionException"
|
||||
}
|
||||
|
||||
// Message returns the exception's message.
|
||||
func (s *InvalidRequestRegionException) Message() string {
|
||||
if s.Message_ != nil {
|
||||
return *s.Message_
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// OrigErr always returns nil, satisfies awserr.Error interface.
|
||||
func (s *InvalidRequestRegionException) OrigErr() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *InvalidRequestRegionException) Error() string {
|
||||
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
|
||||
}
|
||||
|
||||
// Status code returns the HTTP status code for the request's response error.
|
||||
func (s *InvalidRequestRegionException) StatusCode() int {
|
||||
return s.RespMetadata.StatusCode
|
||||
}
|
||||
|
||||
// RequestID returns the service's response RequestID for request.
|
||||
func (s *InvalidRequestRegionException) RequestID() string {
|
||||
return s.RespMetadata.RequestID
|
||||
}
|
||||
|
||||
// Indicates that the scope provided in the request is invalid.
|
||||
type InvalidScopeException struct {
|
||||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be invalid_scope.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -1238,7 +1791,7 @@ func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput {
|
|||
type RegisterClientOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The endpoint where the client can request authorization.
|
||||
// An endpoint that the client can use to request authorization.
|
||||
AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"`
|
||||
|
||||
// The unique identifier string for each client. This client uses this identifier
|
||||
|
|
@ -1250,12 +1803,16 @@ type RegisterClientOutput struct {
|
|||
|
||||
// A secret string generated for the client. The client will use this string
|
||||
// to get authenticated by the service in subsequent calls.
|
||||
ClientSecret *string `locationName:"clientSecret" type:"string"`
|
||||
//
|
||||
// ClientSecret is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by RegisterClientOutput's
|
||||
// String and GoString methods.
|
||||
ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"`
|
||||
|
||||
// Indicates the time at which the clientId and clientSecret will become invalid.
|
||||
ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"`
|
||||
|
||||
// The endpoint where the client can get an access token.
|
||||
// An endpoint that the client can use to create tokens.
|
||||
TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"`
|
||||
}
|
||||
|
||||
|
|
@ -1319,8 +1876,11 @@ type SlowDownException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be slow_down.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -1395,11 +1955,15 @@ type StartDeviceAuthorizationInput struct {
|
|||
// A secret string that is generated for the client. This value should come
|
||||
// from the persisted result of the RegisterClient API operation.
|
||||
//
|
||||
// ClientSecret is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's
|
||||
// String and GoString methods.
|
||||
//
|
||||
// ClientSecret is a required field
|
||||
ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"`
|
||||
ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
|
||||
|
||||
// The URL for the AWS access portal. For more information, see Using the AWS
|
||||
// access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
|
||||
// The URL for the Amazon Web Services access portal. For more information,
|
||||
// see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
|
||||
// in the IAM Identity Center User Guide.
|
||||
//
|
||||
// StartUrl is a required field
|
||||
|
|
@ -1550,8 +2114,11 @@ type UnauthorizedClientException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be unauthorized_client.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
@ -1618,8 +2185,11 @@ type UnsupportedGrantTypeException struct {
|
|||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
// Single error code. For this exception the value will be unsupported_grant_type.
|
||||
Error_ *string `locationName:"error" type:"string"`
|
||||
|
||||
// Human-readable text providing additional information, used to assist the
|
||||
// client developer in understanding the error that occurred.
|
||||
Error_description *string `locationName:"error_description" type:"string"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
|
|
|
|||
39
vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
generated
vendored
39
vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
generated
vendored
|
|
@ -3,15 +3,13 @@
|
|||
// Package ssooidc provides the client and types for making API
|
||||
// requests to AWS SSO OIDC.
|
||||
//
|
||||
// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect
|
||||
// (OIDC) is a web service that enables a client (such as AWS CLI or a native
|
||||
// application) to register with IAM Identity Center. The service also enables
|
||||
// the client to fetch the user’s access token upon successful authentication
|
||||
// and authorization with IAM Identity Center.
|
||||
// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
|
||||
// client (such as CLI or a native application) to register with IAM Identity
|
||||
// Center. The service also enables the client to fetch the user’s access
|
||||
// token upon successful authentication and authorization with IAM Identity
|
||||
// Center.
|
||||
//
|
||||
// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces
|
||||
// will continue to retain their original name for backward compatibility purposes.
|
||||
// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed).
|
||||
// IAM Identity Center uses the sso and identitystore API namespaces.
|
||||
//
|
||||
// # Considerations for Using This Guide
|
||||
//
|
||||
|
|
@ -22,21 +20,24 @@
|
|||
// - The IAM Identity Center OIDC service currently implements only the portions
|
||||
// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628
|
||||
// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single
|
||||
// sign-on authentication with the AWS CLI. Support for other OIDC flows
|
||||
// frequently needed for native applications, such as Authorization Code
|
||||
// Flow (+ PKCE), will be addressed in future releases.
|
||||
// sign-on authentication with the CLI.
|
||||
//
|
||||
// - The service emits only OIDC access tokens, such that obtaining a new
|
||||
// token (For example, token refresh) requires explicit user re-authentication.
|
||||
// - With older versions of the CLI, the service only emits OIDC access tokens,
|
||||
// so to obtain a new token, users must explicitly re-authenticate. To access
|
||||
// the OIDC flow that supports token refresh and doesn’t require re-authentication,
|
||||
// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI
|
||||
// V2) with support for OIDC token refresh and configurable IAM Identity
|
||||
// Center session durations. For more information, see Configure Amazon Web
|
||||
// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html).
|
||||
//
|
||||
// - The access tokens provided by this service grant access to all AWS account
|
||||
// entitlements assigned to an IAM Identity Center user, not just a particular
|
||||
// application.
|
||||
// - The access tokens provided by this service grant access to all Amazon
|
||||
// Web Services account entitlements assigned to an IAM Identity Center user,
|
||||
// not just a particular application.
|
||||
//
|
||||
// - The documentation in this guide does not describe the mechanism to convert
|
||||
// the access token into AWS Auth (“sigv4”) credentials for use with
|
||||
// IAM-protected AWS service endpoints. For more information, see GetRoleCredentials
|
||||
// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html)
|
||||
// the access token into Amazon Web Services Auth (“sigv4”) credentials
|
||||
// for use with IAM-protected Amazon Web Services service endpoints. For
|
||||
// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html)
|
||||
// in the IAM Identity Center Portal API Reference Guide.
|
||||
//
|
||||
// For general information about IAM Identity Center, see What is IAM Identity
|
||||
|
|
|
|||
8
vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
generated
vendored
|
|
@ -64,6 +64,13 @@ const (
|
|||
// a required parameter might be missing or out of range.
|
||||
ErrCodeInvalidRequestException = "InvalidRequestException"
|
||||
|
||||
// ErrCodeInvalidRequestRegionException for service response error code
|
||||
// "InvalidRequestRegionException".
|
||||
//
|
||||
// Indicates that a token provided as input to the request was issued by and
|
||||
// is only usable by calling IAM Identity Center endpoints in another region.
|
||||
ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException"
|
||||
|
||||
// ErrCodeInvalidScopeException for service response error code
|
||||
// "InvalidScopeException".
|
||||
//
|
||||
|
|
@ -100,6 +107,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
|
|||
"InvalidClientMetadataException": newErrorInvalidClientMetadataException,
|
||||
"InvalidGrantException": newErrorInvalidGrantException,
|
||||
"InvalidRequestException": newErrorInvalidRequestException,
|
||||
"InvalidRequestRegionException": newErrorInvalidRequestRegionException,
|
||||
"InvalidScopeException": newErrorInvalidScopeException,
|
||||
"SlowDownException": newErrorSlowDownException,
|
||||
"UnauthorizedClientException": newErrorUnauthorizedClientException,
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
generated
vendored
|
|
@ -51,7 +51,7 @@ const (
|
|||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
c.SigningName = "awsssooidc"
|
||||
c.SigningName = "sso-oauth"
|
||||
}
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
|
||||
}
|
||||
|
|
|
|||
20
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
|
@ -1460,7 +1460,15 @@ type AssumeRoleInput struct {
|
|||
// in the IAM User Guide.
|
||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||
|
||||
// Reserved for future use.
|
||||
// A list of previously acquired trusted context assertions in the format of
|
||||
// a JSON array. The trusted context assertion is signed and encrypted by Amazon
|
||||
// Web Services STS.
|
||||
//
|
||||
// The following is an example of a ProvidedContext value that includes a single
|
||||
// trusted context assertion and the ARN of the context provider from which
|
||||
// the trusted context assertion was generated.
|
||||
//
|
||||
// [{"ProviderArn":"arn:aws:iam::aws:contextProvider/identitycenter","ContextAssertion":"trusted-context-assertion"}]
|
||||
ProvidedContexts []*ProvidedContext `type:"list"`
|
||||
|
||||
// The Amazon Resource Name (ARN) of the role to assume.
|
||||
|
|
@ -3405,14 +3413,18 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
|
|||
return s
|
||||
}
|
||||
|
||||
// Reserved for future use.
|
||||
// Contains information about the provided context. This includes the signed
|
||||
// and encrypted trusted context assertion and the context provider ARN from
|
||||
// which the trusted context assertion was generated.
|
||||
type ProvidedContext struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Reserved for future use.
|
||||
// The signed and encrypted trusted context assertion generated by the context
|
||||
// provider. The trusted context assertion is signed and encrypted by Amazon
|
||||
// Web Services STS.
|
||||
ContextAssertion *string `min:"4" type:"string"`
|
||||
|
||||
// Reserved for future use.
|
||||
// The context provider ARN from which the trusted context assertion was generated.
|
||||
ProviderArn *string `min:"20" type:"string"`
|
||||
}
|
||||
|
||||
|
|
|
|||
13
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
13
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
|
|
@ -284,12 +284,25 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
|||
}
|
||||
}
|
||||
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms.
|
||||
// There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression,
|
||||
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
|
||||
//
|
||||
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
|
||||
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
|
||||
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
|
||||
// inconsistent data to be logged.
|
||||
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
|
||||
}
|
||||
}
|
||||
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
|
||||
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms, see above.
|
||||
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
|
|
@ -70,7 +70,7 @@ func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypt
|
|||
}
|
||||
}
|
||||
|
||||
// bpdData contains data that the copy pipeline needs about the encryption step.
|
||||
// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step.
|
||||
type bpEncryptionStepData struct {
|
||||
encrypting bool // We are actually encrypting the stream
|
||||
finalizer ocicrypt.EncryptLayerFinalizer
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
|
|
@ -340,7 +340,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
sigs = append(slices.Clone(sigs), newSigs...)
|
||||
|
||||
c.Printf("Storing list signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
|
||||
|
|
|
|||
11
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
11
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
|
|
@ -277,7 +277,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
|||
if err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
sigs = append(slices.Clone(sigs), newSigs...)
|
||||
|
||||
if len(sigs) > 0 {
|
||||
c.Printf("Storing signatures\n")
|
||||
|
|
@ -380,8 +380,9 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
|||
|
||||
compressionAlgos := set.New[string]()
|
||||
for _, srcInfo := range ic.src.LayerInfos() {
|
||||
compression := compressionAlgorithmFromMIMEType(srcInfo)
|
||||
compressionAlgos.Add(compression.Name())
|
||||
if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil {
|
||||
compressionAlgos.Add(c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
algos, err := algorithmsByNames(compressionAlgos.Values())
|
||||
|
|
@ -743,7 +744,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
|
||||
if err == nil {
|
||||
if srcInfo.Size != -1 {
|
||||
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||
refill := srcInfo.Size - bar.Current()
|
||||
bar.SetCurrent(srcInfo.Size)
|
||||
bar.SetRefill(refill)
|
||||
}
|
||||
bar.mark100PercentComplete()
|
||||
hideProgressBar = false
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
|
|
@ -105,7 +106,7 @@ func makeErrorList(err error) []error {
|
|||
}
|
||||
|
||||
func mergeErrors(err1, err2 error) error {
|
||||
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
||||
return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...))
|
||||
}
|
||||
|
||||
// handleErrorResponse returns error parsed from HTTP response for an
|
||||
|
|
|
|||
5
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
|
|
@ -363,6 +363,11 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
|||
hostname := registry
|
||||
if registry == dockerHostname {
|
||||
hostname = dockerV1Hostname
|
||||
// A search term of library/foo does not find the library/foo image on the docker.io servers,
|
||||
// which is surprising - and that Docker is modifying the search term client-side this same way,
|
||||
// and it seems convenient to do the same thing.
|
||||
// Read more here: https://github.com/containers/image/pull/2133#issue-1928524334
|
||||
image = strings.TrimPrefix(image, "library/")
|
||||
}
|
||||
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
|
|
|
|||
41
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
41
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
|
|
@ -137,7 +137,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
|
||||
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
|
||||
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
|
||||
if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
|
||||
if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests {
|
||||
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
|
||||
if err != nil {
|
||||
|
|
@ -341,30 +341,38 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
// Then try reusing blobs from other locations.
|
||||
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
|
||||
for _, candidate := range candidates {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
var candidateRepo reference.Named
|
||||
if !candidate.UnknownLocation {
|
||||
candidateRepo, err = parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
|
||||
requiredCompression := "nil"
|
||||
if compressionAlgorithm != nil {
|
||||
requiredCompression = compressionAlgorithm.Name()
|
||||
}
|
||||
if !candidate.UnknownLocation {
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
|
||||
} else {
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !candidate.UnknownLocation {
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
}
|
||||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
|
||||
|
|
@ -375,6 +383,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
|
||||
}
|
||||
// This digest is a known variant of this blob but we don’t
|
||||
// have a recorded location in this registry, let’s try looking
|
||||
// for it in the current repo.
|
||||
candidateRepo = reference.TrimNamed(d.ref.ref)
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
|
|
@ -688,6 +707,10 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
|
|||
}
|
||||
}
|
||||
|
||||
// To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it.
|
||||
ociManifest.Layers = slices.Clone(ociManifest.Layers)
|
||||
// We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to
|
||||
// the slice in the original object (or in a newly allocated object).
|
||||
for _, sig := range signatures {
|
||||
mimeType := sig.UntrustedMIMEType()
|
||||
payloadBlob := sig.UntrustedPayload()
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
|
|
@ -88,7 +88,7 @@ func registryHTTPResponseToError(res *http.Response) error {
|
|||
response = response[:50] + "..."
|
||||
}
|
||||
// %.0w makes e visible to error.Unwrap() without including any text
|
||||
err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e)
|
||||
err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e)
|
||||
case errcode.Error:
|
||||
// e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually
|
||||
// rather redundant. So reword it without using e.Code.Error() if e.Message is the default.
|
||||
|
|
|
|||
5
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
5
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
|
|
@ -32,7 +32,7 @@ type BlobInfoCache2 interface {
|
|||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
|
|
@ -48,5 +48,6 @@ type BlobInfoCache2 interface {
|
|||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
|
||||
Location types.BICLocationReference
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
}
|
||||
|
|
|
|||
57
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
57
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
|
|
@ -196,14 +196,12 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
|
|||
return m.convertToManifestSchema2(ctx, options)
|
||||
}
|
||||
|
||||
// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions.
|
||||
// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format.
|
||||
// If not, it returns (nil, nil).
|
||||
// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
|
||||
// and edits *options to not try decryption again.
|
||||
func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
|
||||
if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool {
|
||||
return info.CryptoOperation == types.Decrypt
|
||||
}) {
|
||||
func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
|
||||
if options == nil || options.LayerInfos == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
@ -212,19 +210,35 @@ func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.Manife
|
|||
return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
|
||||
}
|
||||
|
||||
res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate.
|
||||
updatedEdits := slices.Clone(options.LayerInfos)
|
||||
for i, info := range options.LayerInfos {
|
||||
if info.CryptoOperation == types.Decrypt {
|
||||
res[i].CryptoOperation = types.Decrypt
|
||||
updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
|
||||
ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate.
|
||||
laterEdits := slices.Clone(options.LayerInfos)
|
||||
needsOCIOnlyEdits := false
|
||||
for i, edit := range options.LayerInfos {
|
||||
// Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
|
||||
ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal
|
||||
ociOnlyEdits[i].CompressionAlgorithm = nil
|
||||
|
||||
if edit.CryptoOperation == types.Decrypt {
|
||||
needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas
|
||||
ociOnlyEdits[i].CryptoOperation = types.Decrypt
|
||||
laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
|
||||
}
|
||||
// Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
|
||||
res[i].CompressionOperation = types.PreserveOriginal
|
||||
res[i].CompressionAlgorithm = nil
|
||||
|
||||
if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd ||
|
||||
originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas.
|
||||
ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation
|
||||
ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm
|
||||
laterEdits[i].CompressionOperation = types.PreserveOriginal
|
||||
laterEdits[i].CompressionAlgorithm = nil
|
||||
}
|
||||
options.LayerInfos = updatedEdits
|
||||
return res, nil
|
||||
}
|
||||
if !needsOCIOnlyEdits {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
options.LayerInfos = laterEdits
|
||||
return ociOnlyEdits, nil
|
||||
}
|
||||
|
||||
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
|
||||
|
|
@ -238,15 +252,15 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
|
|||
|
||||
// Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
|
||||
// which remove OCI-specific features, because trying to convert those layers would fail.
|
||||
// So, do the layer updates for decryption.
|
||||
// So, do the layer updates for decryption, and for conversions from Zstd.
|
||||
ociManifest := m.m
|
||||
layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options)
|
||||
ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if layerDecryptEdits != nil {
|
||||
if ociOnlyEdits != nil {
|
||||
ociManifest = manifest.OCI1Clone(ociManifest)
|
||||
if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil {
|
||||
if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -275,9 +289,8 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
|
|||
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
|
||||
// FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released
|
||||
case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
|
||||
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc:
|
||||
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc:
|
||||
return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
|
||||
|
|
|
|||
5
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
5
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
|
|
@ -12,6 +12,11 @@ func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candi
|
|||
if options.RequiredCompression == nil {
|
||||
return true // no requirement imposed
|
||||
}
|
||||
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
|
||||
}
|
||||
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
4
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
|
|
@ -133,7 +133,9 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
|||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
// slices.Clone() here to ensure a private backing array;
|
||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
|
|
@ -167,7 +167,9 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
|
|||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
// slices.Clone() here to ensure the slice uses a private backing array;
|
||||
// an external caller could have manually created OCI1IndexPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
}
|
||||
if len(addedEntries) != 0 || updatedAnnotations {
|
||||
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
|
||||
|
|
@ -220,7 +222,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip
|
|||
case ic.manifestPosition != other.manifestPosition:
|
||||
return ic.manifestPosition < other.manifestPosition
|
||||
}
|
||||
panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
|
||||
panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
|
||||
}
|
||||
|
||||
// chooseInstance is a private equivalent to ChooseInstanceByCompression,
|
||||
|
|
|
|||
12
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
12
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
|
|
@ -28,6 +28,18 @@ func (e ImageNotFoundError) Error() string {
|
|||
return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
|
||||
}
|
||||
|
||||
// ArchiveFileNotFoundError occurs when the archive file does not exist.
|
||||
type ArchiveFileNotFoundError struct {
|
||||
// ref is the image reference
|
||||
ref ociArchiveReference
|
||||
// path is the file path that was not present
|
||||
path string
|
||||
}
|
||||
|
||||
func (e ArchiveFileNotFoundError) Error() string {
|
||||
return fmt.Sprintf("archive file not found: %q", e.path)
|
||||
}
|
||||
|
||||
type ociArchiveImageSource struct {
|
||||
impl.Compat
|
||||
|
||||
|
|
|
|||
19
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
19
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
|
@ -171,18 +172,24 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error)
|
|||
|
||||
// creates the temporary directory and copies the tarred content to it
|
||||
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
|
||||
src := ref.resolvedFile
|
||||
arch, err := os.Open(src)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src}
|
||||
} else {
|
||||
return tempDirOCIRef{}, err
|
||||
}
|
||||
}
|
||||
defer arch.Close()
|
||||
|
||||
tempDirRef, err := createOCIRef(sys, ref.image)
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err)
|
||||
}
|
||||
src := ref.resolvedFile
|
||||
dst := tempDirRef.tempDirectory
|
||||
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
arch, err := os.Open(src)
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, err
|
||||
}
|
||||
defer arch.Close()
|
||||
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
|
||||
|
|
|
|||
240
vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
generated
vendored
Normal file
240
vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
generated
vendored
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
package layout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// DeleteImage deletes the named image from the directory, if supported.
|
||||
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
sharedBlobsDir := ""
|
||||
if sys != nil && sys.OCISharedBlobDirPath != "" {
|
||||
sharedBlobsDir = sys.OCISharedBlobDirPath
|
||||
}
|
||||
|
||||
descriptor, descriptorIndex, err := ref.getManifestDescriptor()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var blobsUsedByImage map[digest.Digest]int
|
||||
|
||||
switch descriptor.MediaType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir)
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir)
|
||||
default:
|
||||
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ref.deleteBlobs(blobsToDelete)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ref.deleteReferenceFromIndex(descriptorIndex)
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
|
||||
manifest, err := ref.getManifest(descriptor, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest)
|
||||
blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference
|
||||
|
||||
return blobsUsedInManifest, nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
|
||||
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index, err := parseIndex(blobPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobsUsedInImageRefIndex := make(map[digest.Digest]int)
|
||||
err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference
|
||||
|
||||
return blobsUsedInImageRefIndex, nil
|
||||
}
|
||||
|
||||
// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map
|
||||
func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
|
||||
for _, descriptor := range index.Manifests {
|
||||
destination[descriptor.Digest]++
|
||||
switch descriptor.MediaType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
manifest, err := ref.getManifest(&descriptor, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for digest, count := range ref.getBlobsUsedInManifest(manifest) {
|
||||
destination[digest] += count
|
||||
}
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index, err := parseIndex(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int {
|
||||
blobsUsedInManifest := make(map[digest.Digest]int, 0)
|
||||
|
||||
blobsUsedInManifest[manifest.Config.Digest]++
|
||||
for _, layer := range manifest.Layers {
|
||||
blobsUsedInManifest[layer.Digest]++
|
||||
}
|
||||
|
||||
return blobsUsedInManifest
|
||||
}
|
||||
|
||||
// This takes in a map of the digest and their usage count in the manifest to be deleted
|
||||
// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted
|
||||
func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) {
|
||||
rootIndex, err := ref.getIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInRootIndex := make(map[digest.Digest]int)
|
||||
err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobsToDelete := set.New[digest.Digest]()
|
||||
|
||||
for digest, count := range blobsUsedInRootIndex {
|
||||
if count-blobsUsedByDescriptorToDelete[digest] == 0 {
|
||||
blobsToDelete.Add(digest)
|
||||
}
|
||||
}
|
||||
|
||||
return blobsToDelete, nil
|
||||
}
|
||||
|
||||
// This transport never generates layouts where blobs for an image are both in the local blobs directory
|
||||
// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set.
|
||||
//
|
||||
// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what
|
||||
// the other layouts sharing that directory are, and we might not even have permission to read them),
|
||||
// so we can’t really delete any blobs in that case.
|
||||
// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt,
|
||||
// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently
|
||||
// check for local blobs (but we should make no noise if the blobs are actually in the shared directory).
|
||||
//
|
||||
// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set
|
||||
func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error {
|
||||
for _, digest := range blobsToDelete.Values() {
|
||||
blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteBlob(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteBlob(blobPath string) error {
|
||||
logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath))
|
||||
|
||||
err := os.Remove(blobPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
|
||||
index, err := ref.getIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1)
|
||||
|
||||
return saveJSON(ref.indexPath(), index)
|
||||
}
|
||||
|
||||
func saveJSON(path string, content any) error {
|
||||
// If the file already exists, get its mode to preserve it
|
||||
var mode fs.FileMode
|
||||
existingfi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
} else { // File does not exist, use default mode
|
||||
mode = 0644
|
||||
}
|
||||
} else {
|
||||
mode = existingfi.Mode()
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return json.NewEncoder(file).Encode(content)
|
||||
}
|
||||
|
||||
func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) {
|
||||
manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manifest, err := parseJSON[imgspecv1.Manifest](manifestPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
15
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
15
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
|
|
@ -19,6 +19,7 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type ociImageDestination struct {
|
||||
|
|
@ -84,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (private.Im
|
|||
// Per the OCI image specification, layouts MUST have a "blobs" subdirectory,
|
||||
// but it MAY be empty (e.g. if we never end up calling PutBlob)
|
||||
// https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19
|
||||
if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil {
|
||||
if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
|
|
@ -271,8 +272,8 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
|
|||
return
|
||||
}
|
||||
}
|
||||
// It's a new entry to be added to the index.
|
||||
d.index.Manifests = append(d.index.Manifests, *desc)
|
||||
// It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created.
|
||||
d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc)
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
|
|
@ -283,7 +284,13 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
|
|||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
|
||||
if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{
|
||||
Version: imgspecv1.ImageLayoutVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
indexJSON, err := json.Marshal(d.index)
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
|
|
@ -60,7 +60,7 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSo
|
|||
|
||||
client := &http.Client{}
|
||||
client.Transport = tr
|
||||
descriptor, err := ref.getManifestDescriptor()
|
||||
descriptor, _, err := ref.getManifestDescriptor()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
62
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
62
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
|
|
@ -160,48 +160,56 @@ func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext)
|
|||
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
|
||||
// with an error.
|
||||
func (ref ociReference) getIndex() (*imgspecv1.Index, error) {
|
||||
indexJSON, err := os.Open(ref.indexPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer indexJSON.Close()
|
||||
|
||||
index := &imgspecv1.Index{}
|
||||
if err := json.NewDecoder(indexJSON).Decode(index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return index, nil
|
||||
return parseIndex(ref.indexPath())
|
||||
}
|
||||
|
||||
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
||||
func parseIndex(path string) (*imgspecv1.Index, error) {
|
||||
return parseJSON[imgspecv1.Index](path)
|
||||
}
|
||||
|
||||
func parseJSON[T any](path string) (*T, error) {
|
||||
content, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
obj := new(T)
|
||||
if err := json.NewDecoder(content).Decode(obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) {
|
||||
index, err := ref.getIndex()
|
||||
if err != nil {
|
||||
return imgspecv1.Descriptor{}, err
|
||||
return imgspecv1.Descriptor{}, -1, err
|
||||
}
|
||||
|
||||
if ref.image == "" {
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, ErrMoreThanOneImage
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
}
|
||||
return index.Manifests[0], nil
|
||||
return index.Manifests[0], 0, nil
|
||||
} else {
|
||||
// if image specified, look through all manifests for a match
|
||||
var unsupportedMIMETypes []string
|
||||
for _, md := range index.Manifests {
|
||||
for i, md := range index.Manifests {
|
||||
if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
|
||||
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
|
||||
return md, nil
|
||||
return md, i, nil
|
||||
}
|
||||
unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
|
||||
}
|
||||
}
|
||||
if len(unsupportedMIMETypes) != 0 {
|
||||
return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
}
|
||||
}
|
||||
return imgspecv1.Descriptor{}, ImageNotFoundError{ref}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||
|
|
@ -211,7 +219,8 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor,
|
|||
if !ok {
|
||||
return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef")
|
||||
}
|
||||
return ociRef.getManifestDescriptor()
|
||||
md, _, err := ociRef.getManifestDescriptor()
|
||||
return md, err
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
|
|
@ -226,19 +235,14 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst
|
|||
return newImageDestination(sys, ref)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
return errors.New("Deleting images not implemented for oci: images")
|
||||
}
|
||||
|
||||
// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
|
||||
func (ref ociReference) ociLayoutPath() string {
|
||||
return filepath.Join(ref.dir, "oci-layout")
|
||||
return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile)
|
||||
}
|
||||
|
||||
// indexPath returns a path for the index.json within a directory using OCI conventions.
|
||||
func (ref ociReference) indexPath() string {
|
||||
return filepath.Join(ref.dir, "index.json")
|
||||
return filepath.Join(ref.dir, imgspecv1.ImageIndexFile)
|
||||
}
|
||||
|
||||
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
||||
|
|
@ -246,9 +250,11 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
|
|||
if err := digest.Validate(); err != nil {
|
||||
return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err)
|
||||
}
|
||||
blobDir := filepath.Join(ref.dir, "blobs")
|
||||
var blobDir string
|
||||
if sharedBlobDir != "" {
|
||||
blobDir = sharedBlobDir
|
||||
} else {
|
||||
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
|
||||
}
|
||||
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,15 +10,20 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementAttempts = 5
|
||||
|
||||
// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementUnknownLocationAttempts = 2
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
|
|
@ -77,9 +82,22 @@ func (css *candidateSortState) Swap(i, j int) {
|
|||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||
// number of entries to limit, only to make testing simpler.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
// number of entries to limit for known and unknown location separately, only to make testing simpler.
|
||||
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
|
||||
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
|
||||
// split unknown candidates and known candidates
|
||||
// and limit them seperately.
|
||||
var knownLocationCandidates []CandidateWithTime
|
||||
var unknownLocationCandidates []CandidateWithTime
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
|
||||
|
|
@ -88,24 +106,34 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
|||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
|
||||
resLength := len(cs)
|
||||
if resLength > maxCandidates {
|
||||
resLength = maxCandidates
|
||||
for _, candidate := range cs {
|
||||
if candidate.Candidate.UnknownLocation {
|
||||
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
|
||||
} else {
|
||||
knownLocationCandidates = append(knownLocationCandidates, candidate)
|
||||
}
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, resLength)
|
||||
for i := range res {
|
||||
res[i] = cs[i].Candidate
|
||||
}
|
||||
|
||||
knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
|
||||
remainingCapacity := totalLimit - knownLocationCandidatesUsed
|
||||
unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates)))
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
|
||||
for i := 0; i < knownLocationCandidatesUsed; i++ {
|
||||
res[i] = knownLocationCandidates[i].Candidate
|
||||
}
|
||||
// If candidates with unknown location are found, lets add them to final list
|
||||
for i := 0; i < unknownLocationCandidatesUsed; i++ {
|
||||
res = append(res, unknownLocationCandidates[i].Candidate)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
|
||||
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an
|
||||
// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts)
|
||||
}
|
||||
|
|
|
|||
46
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
46
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
|
|
@ -133,17 +133,21 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
|
|||
mem.compressors[blobDigest] = compressorName
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
|
||||
// with corresponding compression info from mem.compressors, and returns the result of appending
|
||||
// them to candidates. v2Output allows including candidates with unknown location, and filters out
|
||||
// candidates with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressorName = v
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression && v2Output {
|
||||
return candidates
|
||||
}
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
if len(locations) > 0 {
|
||||
for l, t := range locations {
|
||||
compressorName, compressorKnown := mem.compressors[digest]
|
||||
if !compressorKnown {
|
||||
if requireCompressionInfo {
|
||||
continue
|
||||
}
|
||||
compressorName = blobinfocache.UnknownCompression
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
|
|
@ -153,6 +157,17 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
LastSeen: t,
|
||||
})
|
||||
}
|
||||
} else if v2Output {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
|
|
@ -166,7 +181,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
|
|||
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
|
|
@ -176,23 +191,24 @@ func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope type
|
|||
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
if otherDigests != nil {
|
||||
for _, d := range otherDigests.Values() {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
48
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
48
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
|
|
@ -57,7 +57,7 @@ type cache struct {
|
|||
|
||||
// The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool.
|
||||
// That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily
|
||||
// the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly
|
||||
// the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly
|
||||
// incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have
|
||||
// a Close method, so creating a lot of single-use caches could leak data.
|
||||
//
|
||||
|
|
@ -117,7 +117,7 @@ func (sqc *cache) Open() {
|
|||
if sqc.refCount == 0 {
|
||||
db, err := rawOpen(sqc.path)
|
||||
if err != nil {
|
||||
logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err)
|
||||
logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err)
|
||||
db = nil // But still increase sqc.refCount, because a .Close() will happen
|
||||
}
|
||||
sqc.db = db
|
||||
|
|
@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
|||
|
||||
// dbTransaction calls fn within a read-write transaction in db.
|
||||
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
// Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion.
|
||||
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
|
||||
|
||||
var zeroRes T // A zero value of T
|
||||
|
||||
|
|
@ -249,7 +249,7 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
|
|||
// * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
|
||||
//
|
||||
// Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
|
||||
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups)
|
||||
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups)
|
||||
// is probably costlier than comparing a few more bytes of data.
|
||||
//
|
||||
// Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
|
||||
|
|
@ -427,11 +427,13 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
|
|||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) {
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
|
||||
// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
|
||||
// location, and filters out candidates with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
if requireCompressionInfo {
|
||||
if v2Output {
|
||||
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
|
||||
"ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
|
|
@ -448,6 +450,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
}
|
||||
defer rows.Close()
|
||||
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
for rows.Next() {
|
||||
var location string
|
||||
var time time.Time
|
||||
|
|
@ -455,7 +458,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
if err := rows.Scan(&location, &time, &compressorName); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
|
|
@ -467,10 +470,29 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through locations: %w", err)
|
||||
}
|
||||
|
||||
if len(res) == 0 && v2Output {
|
||||
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning compressorName: %w", err)
|
||||
}
|
||||
if found {
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressor,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
}
|
||||
}
|
||||
candidates = append(candidates, res...)
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
|
|
@ -483,11 +505,11 @@ func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope type
|
|||
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo)
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -516,7 +538,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -527,7 +549,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
281
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
281
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -61,78 +62,6 @@ func newAuthPathDefault(path string) authPath {
|
|||
return authPath{path: path, legacyFormat: false}
|
||||
}
|
||||
|
||||
// SetCredentials stores the username and password in a location
|
||||
// appropriate for sys and the users’ configuration.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
// Returns a human-readable description of the location that was updated.
|
||||
// NOTE: The return value is only intended to be read by humans; its form is not an API,
|
||||
// it may change (or new forms can be added) any time.
|
||||
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
|
||||
isNamespaced, err := validateKey(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make sure to collect all errors.
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var desc string
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helpers for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
if ch, exists := fileContents.CredHelpers[key]; exists {
|
||||
if isNamespaced {
|
||||
return false, "", unsupportedNamespaceErr(ch)
|
||||
}
|
||||
desc, err := setCredsInCredHelper(ch, key, username, password)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
return false, desc, nil
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
fileContents.AuthConfigs[key] = newCreds
|
||||
return true, "", nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
if isNamespaced {
|
||||
err = unsupportedNamespaceErr(helper)
|
||||
} else {
|
||||
desc, err = setCredsInCredHelper(helper, key, username, password)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
|
||||
return desc, nil
|
||||
}
|
||||
return "", multiErr
|
||||
}
|
||||
|
||||
func unsupportedNamespaceErr(helper string) error {
|
||||
return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
|
||||
}
|
||||
|
||||
// SetAuthentication stores the username and password in the credential helper or file
|
||||
// See the documentation of SetCredentials for format of "key"
|
||||
func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
|
||||
_, err := SetCredentials(sys, key, username, password)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetAllCredentials returns the registry credentials for all registries stored
|
||||
// in any of the configured credential helpers.
|
||||
func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) {
|
||||
|
|
@ -370,17 +299,79 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string)
|
|||
return creds.Username, creds.Password, nil
|
||||
}
|
||||
|
||||
// SetCredentials stores the username and password in a location
|
||||
// appropriate for sys and the users’ configuration.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
// Returns a human-readable description of the location that was updated.
|
||||
// NOTE: The return value is only intended to be read by humans; its form is not an API,
|
||||
// it may change (or new forms can be added) any time.
|
||||
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
|
||||
helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make sure to collect all errors.
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var desc string
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helpers for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
if ch, exists := fileContents.CredHelpers[key]; exists {
|
||||
if isNamespaced {
|
||||
return false, "", unsupportedNamespaceErr(ch)
|
||||
}
|
||||
desc, err := setCredsInCredHelper(ch, key, username, password)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
return false, desc, nil
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
fileContents.AuthConfigs[key] = newCreds
|
||||
return true, "", nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
if isNamespaced {
|
||||
err = unsupportedNamespaceErr(helper)
|
||||
} else {
|
||||
desc, err = setCredsInCredHelper(helper, key, username, password)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
|
||||
return desc, nil
|
||||
}
|
||||
return "", multiErr
|
||||
}
|
||||
|
||||
func unsupportedNamespaceErr(helper string) error {
|
||||
return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
|
||||
}
|
||||
|
||||
// SetAuthentication stores the username and password in the credential helper or file
|
||||
// See the documentation of SetCredentials for format of "key"
|
||||
func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
|
||||
_, err := SetCredentials(sys, key, username, password)
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveAuthentication removes credentials for `key` from all possible
|
||||
// sources such as credential helpers and auth files.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
||||
isNamespaced, err := validateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -411,7 +402,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
|||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
_, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
if innerHelper, exists := fileContents.CredHelpers[key]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
}
|
||||
|
|
@ -443,7 +434,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
|||
// RemoveAllAuthentication deletes all the credentials stored in credential
|
||||
// helpers and auth files.
|
||||
func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -454,7 +445,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
_, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
for registry, helper := range fileContents.CredHelpers {
|
||||
// Helpers in auth files are expected
|
||||
// to exist, so no special treatment
|
||||
|
|
@ -497,6 +488,46 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
return multiErr
|
||||
}
|
||||
|
||||
// prepareForEdit processes sys and key (if keyRelevant) to return:
|
||||
// - a list of credential helpers
|
||||
// - a function which can be used to edit the JSON file
|
||||
// - the key value to actually use in credential helpers / JSON
|
||||
// - a boolean which is true if key is namespaced (and should not be used with credential helpers).
|
||||
func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) {
|
||||
var isNamespaced bool
|
||||
if keyRelevant {
|
||||
ns, err := validateKey(key)
|
||||
if err != nil {
|
||||
return nil, nil, "", false, err
|
||||
}
|
||||
isNamespaced = ns
|
||||
}
|
||||
|
||||
if sys != nil && sys.DockerCompatAuthFilePath != "" {
|
||||
if sys.AuthFilePath != "" {
|
||||
return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
|
||||
}
|
||||
if keyRelevant {
|
||||
if isNamespaced {
|
||||
return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key)
|
||||
}
|
||||
if key == "docker.io" {
|
||||
key = "https://index.docker.io/v1/"
|
||||
}
|
||||
}
|
||||
|
||||
// Do not use helpers defined in sysregistriesv2 because Docker isn’t aware of them.
|
||||
return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return nil, nil, "", false, err
|
||||
}
|
||||
|
||||
return helpers, modifyJSON, key, isNamespaced, nil
|
||||
}
|
||||
|
||||
func listCredsInCredHelper(credHelper string) (map[string]string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
|
|
@ -513,9 +544,17 @@ func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) {
|
|||
// it exists only to allow testing it with an artificial runtime.GOOS.
|
||||
func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) {
|
||||
if sys != nil {
|
||||
if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" {
|
||||
return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
|
||||
}
|
||||
if sys.AuthFilePath != "" {
|
||||
return newAuthPathDefault(sys.AuthFilePath), true, nil
|
||||
}
|
||||
// When reading, we can process auth.json and Docker’s config.json with the same code.
|
||||
// When writing, prepareForEdit chooses an appropriate jsonEditor implementation.
|
||||
if sys.DockerCompatAuthFilePath != "" {
|
||||
return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil
|
||||
}
|
||||
if sys.LegacyFormatAuthFilePath != "" {
|
||||
return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
|
||||
}
|
||||
|
|
@ -626,6 +665,86 @@ func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfig
|
|||
return description, nil
|
||||
}
|
||||
|
||||
// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and
|
||||
// writes it back if editor returns true.
|
||||
// Returns a human-readable description of the file, to be returned by SetCredentials.
|
||||
//
|
||||
// The editor may also return a human-readable description of the updated location; if it is "",
|
||||
// the file itself is used.
|
||||
func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
|
||||
if sys == nil || sys.DockerCompatAuthFilePath == "" {
|
||||
return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set")
|
||||
}
|
||||
path := sys.DockerCompatAuthFilePath
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Try hard not to clobber fields we don’t understand, even fields which may be added in future Docker versions.
|
||||
var rawContents map[string]json.RawMessage
|
||||
originalBytes, err := os.ReadFile(path)
|
||||
switch {
|
||||
case err == nil:
|
||||
if err := json.Unmarshal(originalBytes, &rawContents); err != nil {
|
||||
return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
|
||||
}
|
||||
case errors.Is(err, fs.ErrNotExist):
|
||||
rawContents = map[string]json.RawMessage{}
|
||||
default: // err != nil
|
||||
return "", err
|
||||
}
|
||||
|
||||
syntheticContents := dockerConfigFile{
|
||||
AuthConfigs: map[string]dockerAuthConfig{},
|
||||
CredHelpers: map[string]string{},
|
||||
}
|
||||
// json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably
|
||||
// config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with
|
||||
// unexpected case.
|
||||
if rawAuths, ok := rawContents["auths"]; ok {
|
||||
// This conversion will lose fields we don’t know about; when updating an entry, we can’t tell whether an unknown field
|
||||
// should be preserved or discarded (because it is made obsolete/unwanted with the new credentials).
|
||||
// It might make sense to track which entries of "auths" we actually modified, and to not touch any others.
|
||||
if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil {
|
||||
return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err)
|
||||
}
|
||||
}
|
||||
if rawCH, ok := rawContents["credHelpers"]; ok {
|
||||
if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil {
|
||||
return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
updated, description, err := editor(&syntheticContents)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("updating %q: %w", path, err)
|
||||
}
|
||||
if updated {
|
||||
rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
|
||||
}
|
||||
rawContents["auths"] = rawAuths
|
||||
// We never modify syntheticContents.CredHelpers, so we don’t need to update it.
|
||||
newData, err := json.MarshalIndent(rawContents, "", "\t")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
|
||||
}
|
||||
|
||||
if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
|
||||
return "", fmt.Errorf("writing to file %q: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if description == "" {
|
||||
description = path
|
||||
}
|
||||
return description, nil
|
||||
}
|
||||
|
||||
func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
|
|
@ -66,7 +66,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsc.Certificates = append(tlsc.Certificates, cert)
|
||||
tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".key") {
|
||||
keyName := f.Name()
|
||||
|
|
|
|||
8
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
8
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
|
|
@ -445,7 +445,7 @@ type ImageCloser interface {
|
|||
Close() error
|
||||
}
|
||||
|
||||
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
|
||||
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage
|
||||
type ManifestUpdateOptions struct {
|
||||
LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
|
||||
EmbeddedDockerReference reference.Named
|
||||
|
|
@ -457,7 +457,7 @@ type ManifestUpdateOptions struct {
|
|||
// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
|
||||
// only to make writing struct literals possible.
|
||||
type ManifestUpdateInformation struct {
|
||||
Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
|
||||
Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
|
||||
LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
|
||||
LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
|
||||
}
|
||||
|
|
@ -594,6 +594,10 @@ type SystemContext struct {
|
|||
// this field is ignored if `AuthFilePath` is set (we favor the newer format);
|
||||
// only reading of this data is supported;
|
||||
LegacyFormatAuthFilePath string
|
||||
// If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed.
|
||||
// This must not be set if AuthFilePath is set.
|
||||
// Only credentials and credential helpers in this file apre processed, not any other configuration in this file.
|
||||
DockerCompatAuthFilePath string
|
||||
// If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
|
||||
ArchitectureChoice string
|
||||
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
|
|
@ -6,7 +6,7 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 28
|
||||
VersionMinor = 29
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
|
|
|||
1
vendor/github.com/containers/ocicrypt/Makefile
generated
vendored
1
vendor/github.com/containers/ocicrypt/Makefile
generated
vendored
|
|
@ -28,6 +28,7 @@ vendor:
|
|||
go mod tidy
|
||||
|
||||
test:
|
||||
go clean -testcache
|
||||
go test ./... -test.v
|
||||
|
||||
generate-protobuf:
|
||||
|
|
|
|||
6
vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go
generated
vendored
6
vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go
generated
vendored
|
|
@ -41,7 +41,11 @@ func NewKeyWrapper() keywrap.KeyWrapper {
|
|||
// WrapKeys wraps the session key for recpients and encrypts the optsData, which
|
||||
// describe the symmetric key used for encrypting the layer
|
||||
func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
|
||||
pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...))
|
||||
// append({}, ...) allocates a fresh backing array, and that's necessary to guarantee concurrent calls to WrapKeys (as in c/image/copy.Image)
|
||||
// can't race writing to the same backing array.
|
||||
pubKeys := append([][]byte{}, ec.Parameters["pkcs11-pubkeys"]...) // In Go 1.21, slices.Clone(ec.Parameters["pkcs11-pubkeys"])
|
||||
pubKeys = append(pubKeys, ec.Parameters["pkcs11-yamls"]...)
|
||||
pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, pubKeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
10
vendor/github.com/containers/ocicrypt/spec/spec.go
generated
vendored
10
vendor/github.com/containers/ocicrypt/spec/spec.go
generated
vendored
|
|
@ -9,8 +9,12 @@ const (
|
|||
MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted"
|
||||
// MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers.
|
||||
MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted"
|
||||
// MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers.
|
||||
// MediaTypeLayerNonDistributableGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers.
|
||||
MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted"
|
||||
// MediaTypeLayerZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers.
|
||||
MediaTypeLayerNonDistributableZsdtEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted"
|
||||
// MediaTypeLayerNonDistributableZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers.
|
||||
MediaTypeLayerNonDistributableZstdEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted"
|
||||
// MediaTypeLayerNonDistributableZsdtEnc is MIME type used for non distributable encrypted zstd-compressed layers.
|
||||
//
|
||||
// Deprecated: Use [MediaTypeLayerNonDistributableZstdEnc].
|
||||
MediaTypeLayerNonDistributableZsdtEnc = MediaTypeLayerNonDistributableZstdEnc
|
||||
)
|
||||
|
|
|
|||
19
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
19
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
|
|
@ -955,14 +955,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
|||
if options.ForceMask != nil {
|
||||
// if ForceMask is in place, make sure lchown is disabled.
|
||||
doChown = false
|
||||
uid, gid, mode, err := GetFileOwner(dest)
|
||||
if err == nil {
|
||||
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
|
||||
if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
var rootHdr *tar.Header
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
loop:
|
||||
|
|
@ -1007,6 +1001,9 @@ loop:
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rel == "." {
|
||||
rootHdr = hdr
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
|
|
@ -1080,6 +1077,14 @@ loop:
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.ForceMask != nil && rootHdr != nil {
|
||||
value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode)
|
||||
if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
6
vendor/github.com/containers/storage/pkg/system/rm.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/system/rm.go
generated
vendored
|
|
@ -28,7 +28,7 @@ func EnsureRemoveAll(dir string) error {
|
|||
|
||||
// track retries
|
||||
exitOnErr := make(map[string]int)
|
||||
maxRetry := 100
|
||||
maxRetry := 1000
|
||||
|
||||
// Attempt a simple remove all first, this avoids the more expensive
|
||||
// RecursiveUnmount call if not needed.
|
||||
|
|
@ -38,7 +38,7 @@ func EnsureRemoveAll(dir string) error {
|
|||
|
||||
// Attempt to unmount anything beneath this dir first
|
||||
if err := mount.RecursiveUnmount(dir); err != nil {
|
||||
logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err)
|
||||
logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err)
|
||||
}
|
||||
|
||||
for {
|
||||
|
|
@ -94,6 +94,6 @@ func EnsureRemoveAll(dir string) error {
|
|||
return err
|
||||
}
|
||||
exitOnErr[pe.Path]++
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
generated
vendored
|
|
@ -25,6 +25,11 @@ func GetRootlessUID() int {
|
|||
return os.Getuid()
|
||||
}
|
||||
|
||||
// GetRootlessGID returns the GID of the user in the parent userNS
|
||||
func GetRootlessGID() int {
|
||||
return os.Getgid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=")
|
||||
|
|
|
|||
10
vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
generated
vendored
10
vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
generated
vendored
|
|
@ -441,6 +441,16 @@ func GetRootlessUID() int {
|
|||
return os.Getuid()
|
||||
}
|
||||
|
||||
// GetRootlessGID returns the GID of the user in the parent userNS
|
||||
func GetRootlessGID() int {
|
||||
gidEnv := getenv("_CONTAINERS_ROOTLESS_GID")
|
||||
if gidEnv != "" {
|
||||
u, _ := strconv.Atoi(gidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Getgid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=done")
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
generated
vendored
|
|
@ -25,6 +25,11 @@ func GetRootlessUID() int {
|
|||
return os.Getuid()
|
||||
}
|
||||
|
||||
// GetRootlessGID returns the GID of the user in the parent userNS
|
||||
func GetRootlessGID() int {
|
||||
return os.Getgid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=")
|
||||
|
|
|
|||
1
vendor/github.com/distribution/reference/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/distribution/reference/.gitattributes
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
*.go text eol=lf
|
||||
2
vendor/github.com/distribution/reference/.gitignore
generated
vendored
Normal file
2
vendor/github.com/distribution/reference/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# Cover profiles
|
||||
*.out
|
||||
18
vendor/github.com/distribution/reference/.golangci.yml
generated
vendored
Normal file
18
vendor/github.com/distribution/reference/.golangci.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- dupword # Checks for duplicate words in the source code
|
||||
- gofmt
|
||||
- goimports
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unused
|
||||
- vet
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
run:
|
||||
deadline: 2m
|
||||
5
vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
generated
vendored
Normal file
5
vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
# Code of Conduct
|
||||
|
||||
We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.
|
||||
114
vendor/github.com/distribution/reference/CONTRIBUTING.md
generated
vendored
Normal file
114
vendor/github.com/distribution/reference/CONTRIBUTING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
# Contributing to the reference library
|
||||
|
||||
## Community help
|
||||
|
||||
If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
|
||||
[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
|
||||
|
||||
## Reporting security issues
|
||||
|
||||
The maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
By following these simple rules you will get better and faster feedback on your issue.
|
||||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
||||
- comment if you have some new, technical and relevant information to add to the case
|
||||
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
|
||||
|
||||
### If you have not found an existing issue that describes your problem:
|
||||
|
||||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of (or similar for other container tools):
|
||||
- `docker version`
|
||||
- `docker info`
|
||||
- `docker exec <registry-container> registry --version`
|
||||
3. copy the command line you used to launch your Registry
|
||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
||||
5. reproduce your problem and get your docker daemon logs showing the error
|
||||
6. if relevant, copy your registry logs that show the error
|
||||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Contributions should be made via pull requests. Pull requests will be reviewed
|
||||
by one or more maintainers or reviewers and merged when acceptable.
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. Use your own [fork](https://help.github.com/en/articles/about-forks)
|
||||
2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
3. Test your code
|
||||
4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
|
||||
5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
|
||||
|
||||
Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
for tips on creating a successful contribution.
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
144
vendor/github.com/distribution/reference/GOVERNANCE.md
generated
vendored
Normal file
144
vendor/github.com/distribution/reference/GOVERNANCE.md
generated
vendored
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
# distribution/reference Project Governance
|
||||
|
||||
Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
|
||||
|
||||
For specific guidance on practical contribution steps please
|
||||
see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
|
||||
|
||||
## Maintainership
|
||||
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
|
||||
## Reviewers
|
||||
|
||||
A reviewer is a core role within the project.
|
||||
They share in reviewing issues and pull requests and their LGTM counts towards the
|
||||
required LGTM count to merge a code change into the project.
|
||||
|
||||
Reviewers are part of the organization but do not have write access.
|
||||
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
|
||||
|
||||
## Adding maintainers
|
||||
|
||||
Maintainers are first and foremost contributors that have shown they are
|
||||
committed to the long term success of a project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code, pull
|
||||
request review, and triage of issues in the project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors that have
|
||||
shown regular activity on the project over the prior months. From this list,
|
||||
maintainer candidates are selected and proposed in a pull request or a
|
||||
maintainers communication channel.
|
||||
|
||||
After a candidate has been announced to the maintainers, the existing
|
||||
maintainers are given five business days to discuss the candidate, raise
|
||||
objections and cast their vote. Votes may take place on the communication
|
||||
channel or via pull request comment. Candidates must be approved by at least 66%
|
||||
of the current maintainers by adding their vote on the mailing list. The
|
||||
reviewer role has the same process but only requires 33% of current maintainers.
|
||||
Only maintainers of the repository that the candidate is proposed for are
|
||||
allowed to vote.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The voting process may take place inside a pull request if a
|
||||
maintainer has already discussed the candidacy with the candidate and a
|
||||
maintainer is willing to be a sponsor by opening the pull request. The candidate
|
||||
becomes a maintainer once the pull request is merged.
|
||||
|
||||
## Stepping down policy
|
||||
|
||||
Life priorities, interests, and passions can change. If you're a maintainer but
|
||||
feel you must remove yourself from the list, inform other maintainers that you
|
||||
intend to step down, and if possible, help find someone to pick up your work.
|
||||
At the very least, ensure your work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to remove
|
||||
yourself from the MAINTAINERS file.
|
||||
|
||||
## Removal of inactive maintainers
|
||||
|
||||
Similar to the procedure for adding new maintainers, existing maintainers can
|
||||
be removed from the list if they do not show significant activity on the
|
||||
project. Periodically, the maintainers review the list of maintainers and their
|
||||
activity over the last three months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a neutral
|
||||
person will contact the maintainer to ask if they want to continue being
|
||||
a maintainer. If the maintainer decides to step down as a maintainer, they
|
||||
open a pull request to be removed from the MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to perform the
|
||||
required duties they can be removed with a vote of at least 66% of the current
|
||||
maintainers. In this case, maintainers should first propose the change to
|
||||
maintainers via the maintainers communication channel, then open a pull request
|
||||
for voting. The voting period is five business days. The voting pull request
|
||||
should not come as a surpise to any maintainer and any discussion related to
|
||||
performance must not be discussed on the pull request.
|
||||
|
||||
## How are decisions made?
|
||||
|
||||
Docker distribution is an open-source project with an open design philosophy.
|
||||
This means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, road map, and APIs. *If it's part of
|
||||
the project, it's in the repo. If it's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An
|
||||
implementation change is a change to the source code. An API change is a change
|
||||
to the API specification. A philosophy change is a change to the philosophy
|
||||
manifesto, and so on.
|
||||
|
||||
All decisions affecting distribution, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects.
|
||||
|
||||
## Helping contributors with the DCO
|
||||
|
||||
The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some contributors are not as familiar with `git`, or have used a web
|
||||
based editor, and thus asking them to `git commit --amend -s` is not the best
|
||||
way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO.
|
||||
The most trivial way for a contributor to allow the maintainer to do this, is to
|
||||
add a DCO signature in a pull requests's comment, or a maintainer can simply
|
||||
note that the change is sufficiently trivial that it does not substantially
|
||||
change the existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
|
||||
## I'm a maintainer. Should I make pull requests too?
|
||||
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
If you have a technical dispute that you feel has reached an impasse with a
|
||||
subset of the community, any contributor may open an issue, specifically
|
||||
calling for a resolution vote of the current core maintainers to resolve the
|
||||
dispute. The same voting quorums required (2/3) for adding and removing
|
||||
maintainers will apply to conflict resolution.
|
||||
202
vendor/github.com/distribution/reference/LICENSE
generated
vendored
Normal file
202
vendor/github.com/distribution/reference/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
26
vendor/github.com/distribution/reference/MAINTAINERS
generated
vendored
Normal file
26
vendor/github.com/distribution/reference/MAINTAINERS
generated
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# Distribution project maintainers & reviewers
|
||||
#
|
||||
# See GOVERNANCE.md for maintainer versus reviewer roles
|
||||
#
|
||||
# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io)
|
||||
# GitHub ID, Name, Email address
|
||||
"chrispat","Chris Patterson","chrispat@github.com"
|
||||
"clarkbw","Bryan Clark","clarkbw@github.com"
|
||||
"corhere","Cory Snider","csnider@mirantis.com"
|
||||
"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com"
|
||||
"heww","He Weiwei","hweiwei@vmware.com"
|
||||
"joaodrp","João Pereira","jpereira@gitlab.com"
|
||||
"justincormack","Justin Cormack","justin.cormack@docker.com"
|
||||
"squizzi","Kyle Squizzato","ksquizzato@mirantis.com"
|
||||
"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com"
|
||||
"sargun","Sargun Dhillon","sargun@sargun.me"
|
||||
"wy65701436","Wang Yan","wangyan@vmware.com"
|
||||
"stevelasker","Steve Lasker","steve.lasker@microsoft.com"
|
||||
#
|
||||
# REVIEWERS
|
||||
# GitHub ID, Name, Email address
|
||||
"dmcgowan","Derek McGowan","derek@mcgstyle.net"
|
||||
"stevvooe","Stephen Day","stevvooe@gmail.com"
|
||||
"thajeztah","Sebastiaan van Stijn","github@gone.nl"
|
||||
"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com"
|
||||
"Jamstah", "James Hewitt", "james.hewitt@gmail.com"
|
||||
25
vendor/github.com/distribution/reference/Makefile
generated
vendored
Normal file
25
vendor/github.com/distribution/reference/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Project packages.
|
||||
PACKAGES=$(shell go list ./...)
|
||||
|
||||
# Flags passed to `go test`
|
||||
BUILDFLAGS ?=
|
||||
TESTFLAGS ?=
|
||||
|
||||
.PHONY: all build test coverage
|
||||
.DEFAULT: all
|
||||
|
||||
all: build
|
||||
|
||||
build: ## no binaries to build, so just check compilation suceeds
|
||||
go build ${BUILDFLAGS} ./...
|
||||
|
||||
test: ## run tests
|
||||
go test ${TESTFLAGS} ./...
|
||||
|
||||
coverage: ## generate coverprofiles from the unit tests
|
||||
rm -f coverage.txt
|
||||
go test ${TESTFLAGS} -cover -coverprofile=cover.out ./...
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
30
vendor/github.com/distribution/reference/README.md
generated
vendored
Normal file
30
vendor/github.com/distribution/reference/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
# Distribution reference
|
||||
|
||||
Go library to handle references to container images.
|
||||
|
||||
<img src="/distribution-logo.svg" width="200px" />
|
||||
|
||||
[](https://github.com/distribution/reference/actions?query=workflow%3ACI)
|
||||
[](https://pkg.go.dev/github.com/distribution/reference)
|
||||
[](LICENSE)
|
||||
[](https://codecov.io/gh/distribution/reference)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield)
|
||||
|
||||
This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
|
||||
|
||||
## Contribution
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project.
|
||||
|
||||
## Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
This will be the best place to discuss design and implementation.
|
||||
|
||||
For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
|
||||
that everyone is welcome to join and chat about development.
|
||||
|
||||
## Licenses
|
||||
|
||||
The distribution codebase is released under the [Apache 2.0 license](LICENSE).
|
||||
7
vendor/github.com/distribution/reference/SECURITY.md
generated
vendored
Normal file
7
vendor/github.com/distribution/reference/SECURITY.md
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
|
||||
|
||||
Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io.
|
||||
1
vendor/github.com/distribution/reference/distribution-logo.svg
generated
vendored
Normal file
1
vendor/github.com/distribution/reference/distribution-logo.svg
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 8.6 KiB |
|
|
@ -32,7 +32,7 @@ func FamiliarString(ref Reference) string {
|
|||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
// See [path.Match] for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
|
|
@ -1,18 +1,41 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digestset"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// legacyDefaultDomain is the legacy domain for Docker Hub (which was
|
||||
// originally named "the Docker Index"). This domain is still used for
|
||||
// authentication and image search, which were part of the "v1" Docker
|
||||
// registry specification.
|
||||
//
|
||||
// This domain will continue to be supported, but there are plans to consolidate
|
||||
// legacy domains to new "canonical" domains. Once those domains are decided
|
||||
// on, we must update the normalization functions, but preserve compatibility
|
||||
// with existing installs, clients, and user configuration.
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
|
||||
// defaultDomain is the default domain used for images on Docker Hub.
|
||||
// It is used to normalize "familiar" names to canonical names, for example,
|
||||
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
|
||||
//
|
||||
// Note that actual domain of Docker Hub's registry is registry-1.docker.io.
|
||||
// This domain will continue to be supported, but there are plans to consolidate
|
||||
// legacy domains to new "canonical" domains. Once those domains are decided
|
||||
// on, we must update the normalization functions, but preserve compatibility
|
||||
// with existing installs, clients, and user configuration.
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
|
||||
// officialRepoPrefix is the namespace used for official images on Docker Hub.
|
||||
// It is used to normalize "familiar" names to canonical names, for example,
|
||||
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
|
||||
officialRepoPrefix = "library/"
|
||||
|
||||
// defaultTag is the default tag if no tag is provided.
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
|
|
@ -35,14 +58,14 @@ func ParseNormalizedNamed(s string) (Named, error) {
|
|||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
var remote string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
remote = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
remote = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
if strings.ToLower(remote) != remote {
|
||||
return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote)
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
|
|
@ -56,41 +79,53 @@ func ParseNormalizedNamed(s string) (Named, error) {
|
|||
return named, nil
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention. This is added
|
||||
// mainly for backward compatibility.
|
||||
// The reference returned can only be either tagged or digested. For reference contains both tag
|
||||
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
||||
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
||||
// namedTaggedDigested is a reference that has both a tag and a digest.
|
||||
type namedTaggedDigested interface {
|
||||
NamedTagged
|
||||
Digested
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention,
|
||||
// which allows for references to contain both a tag and a digest. It returns a
|
||||
// reference that is either tagged or digested. For references containing both
|
||||
// a tag and a digest, it returns a digested reference. For example, the following
|
||||
// reference:
|
||||
//
|
||||
// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
|
||||
//
|
||||
// Is returned as a digested reference (with the ":latest" tag removed):
|
||||
//
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
|
||||
//
|
||||
// References that are already "tagged" or "digested" are returned unmodified:
|
||||
//
|
||||
// // Already a digested reference
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
|
||||
//
|
||||
// // Already a named reference
|
||||
// docker.io/library/busybox:latest
|
||||
func ParseDockerRef(ref string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := named.(NamedTagged); ok {
|
||||
if canonical, ok := named.(Canonical); ok {
|
||||
// The reference is both tagged and digested, only
|
||||
// return digested.
|
||||
if canonical, ok := named.(namedTaggedDigested); ok {
|
||||
// The reference is both tagged and digested; only return digested.
|
||||
newNamed, err := WithName(canonical.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newCanonical, err := WithDigest(newNamed, canonical.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCanonical, nil
|
||||
}
|
||||
return WithDigest(newNamed, canonical.Digest())
|
||||
}
|
||||
return TagNameOnly(named), nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// splitDockerDomain splits a repository name to domain and remote-name.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
|
|
@ -99,13 +134,13 @@ func splitDockerDomain(name string) (domain, remainder string) {
|
|||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
remainder = officialRepoPrefix + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
|
|
@ -119,8 +154,15 @@ func familiarizeName(named namedRepository) repository {
|
|||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
if strings.HasPrefix(repo.path, officialRepoPrefix) {
|
||||
// TODO(thaJeztah): this check may be too strict, as it assumes the
|
||||
// "library/" namespace does not have nested namespaces. While this
|
||||
// is true (currently), technically it would be possible for Docker
|
||||
// Hub to use those (e.g. "library/distros/ubuntu:latest").
|
||||
// See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785.
|
||||
if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') {
|
||||
repo.path = remainder
|
||||
}
|
||||
}
|
||||
}
|
||||
return repo
|
||||
|
|
@ -180,20 +222,3 @@ func ParseAnyReference(ref string) (Reference, error) {
|
|||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
|
@ -4,11 +4,14 @@
|
|||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// name := [domain '/'] remote-name
|
||||
// domain := host [':' port-number]
|
||||
// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A
|
||||
// domain-name := domain-component ['.' domain-component]*
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// path (or "remote-name") := path-component ['/' path-component]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
|
|
@ -21,7 +24,6 @@
|
|||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
|
|
@ -145,7 +147,7 @@ type namedRepository interface {
|
|||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
// Domain returns the domain part of the [Named] reference.
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
|
|
@ -154,7 +156,7 @@ func Domain(named Named) string {
|
|||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
// Path returns the name without the domain part of the [Named] reference.
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
|
|
@ -175,7 +177,8 @@ func splitDomain(name string) (string, string) {
|
|||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
//
|
||||
// Deprecated: Use [Domain] or [Path].
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
|
|
@ -185,7 +188,6 @@ func SplitHostname(named Named) (string, string) {
|
|||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
|
|
@ -237,7 +239,6 @@ func Parse(s string) (Reference, error) {
|
|||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
|
|
@ -320,11 +321,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
|||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
repo := repository{}
|
||||
if r, ok := ref.(namedRepository); ok {
|
||||
repo.domain, repo.path = r.Domain(), r.Path()
|
||||
} else {
|
||||
repo.domain, repo.path = splitDomain(ref.Name())
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
163
vendor/github.com/distribution/reference/regexp.go
generated
vendored
Normal file
163
vendor/github.com/distribution/reference/regexp.go
generated
vendored
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
|
||||
var DigestRegexp = regexp.MustCompile(digestPat)
|
||||
|
||||
// DomainRegexp matches hostname or IP-addresses, optionally including a port
|
||||
// number. It defines the structure of potential domain components that may be
|
||||
// part of image names. This is purposely a subset of what is allowed by DNS to
|
||||
// ensure backwards compatibility with Docker image names. It may be a subset of
|
||||
// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
|
||||
// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
|
||||
// addresses such as IPv4-Mapped).
|
||||
//
|
||||
// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
|
||||
var DomainRegexp = regexp.MustCompile(domainAndPort)
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
var IdentifierRegexp = regexp.MustCompile(identifier)
|
||||
|
||||
// NameRegexp is the format for the name component of references, including
|
||||
// an optional domain and port, but without tag or digest suffix.
|
||||
var NameRegexp = regexp.MustCompile(namePat)
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
var ReferenceRegexp = regexp.MustCompile(referencePat)
|
||||
|
||||
// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
|
||||
//
|
||||
// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
|
||||
var TagRegexp = regexp.MustCompile(tag)
|
||||
|
||||
const (
|
||||
// alphanumeric defines the alphanumeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphanumeric = `[a-z0-9]+`
|
||||
|
||||
// separator defines the separators allowed to be embedded in name
|
||||
// components. This allows one period, one or two underscore and multiple
|
||||
// dashes. Repeated dashes and underscores are intentionally treated
|
||||
// differently. In order to support valid hostnames as name components,
|
||||
// supporting repeated dash was added. Additionally double underscore is
|
||||
// now allowed as a separator to loosen the restriction for previously
|
||||
// supported names.
|
||||
separator = `(?:[._]|__|[-]+)`
|
||||
|
||||
// localhost is treated as a special value for domain-name. Any other
|
||||
// domain-name without a "." or a ":port" are considered a path component.
|
||||
localhost = `localhost`
|
||||
|
||||
// domainNameComponent restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp.
|
||||
domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
|
||||
|
||||
// optionalPort matches an optional port-number including the port separator
|
||||
// (e.g. ":80").
|
||||
optionalPort = `(?::[0-9]+)?`
|
||||
|
||||
// tag matches valid tag names. From docker/docker:graph/tags.go.
|
||||
tag = `[\w][\w.-]{0,127}`
|
||||
|
||||
// digestPat matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
|
||||
//
|
||||
// TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp
|
||||
// so that go-digest defines the canonical format. Note that the go-digest is
|
||||
// more relaxed:
|
||||
// - it allows multiple algorithms (e.g. "sha256+b64:<encoded>") to allow
|
||||
// future expansion of supported algorithms.
|
||||
// - it allows the "<encoded>" value to use urlsafe base64 encoding as defined
|
||||
// in [rfc4648, section 5].
|
||||
//
|
||||
// [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5.
|
||||
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
|
||||
|
||||
// identifier is the format for a content addressable identifier using sha256.
|
||||
// These identifiers are like digests without the algorithm, since sha256 is used.
|
||||
identifier = `([a-f0-9]{64})`
|
||||
|
||||
// ipv6address are enclosed between square brackets and may be represented
|
||||
// in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
|
||||
// are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
|
||||
// IPv4-Mapped are deliberately excluded.
|
||||
ipv6address = `\[(?:[a-fA-F0-9:]+)\]`
|
||||
)
|
||||
|
||||
var (
|
||||
// domainName defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names. This includes IPv4 addresses on decimal format.
|
||||
domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent)
|
||||
|
||||
// host defines the structure of potential domains based on the URI
|
||||
// Host subcomponent on rfc3986. It may be a subset of DNS domain name,
|
||||
// or an IPv4 address in decimal format, or an IPv6 address between square
|
||||
// brackets (excluding zone identifiers as defined by rfc6874 or special
|
||||
// addresses such as IPv4-Mapped).
|
||||
host = `(?:` + domainName + `|` + ipv6address + `)`
|
||||
|
||||
// allowed by the URI Host subcomponent on rfc3986 to ensure backwards
|
||||
// compatibility with Docker image names.
|
||||
domainAndPort = host + optionalPort
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = regexp.MustCompile(anchored(tag))
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat))
|
||||
|
||||
// pathComponent restricts path-components to start with an alphanumeric
|
||||
// character, with following parts able to be separated by a separator
|
||||
// (one period, one or two underscore and multiple dashes).
|
||||
pathComponent = alphanumeric + anyTimes(separator+alphanumeric)
|
||||
|
||||
// remoteName matches the remote-name of a repository. It consists of one
|
||||
// or more forward slash (/) delimited path-components:
|
||||
//
|
||||
// pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
|
||||
remoteName = pathComponent + anyTimes(`/`+pathComponent)
|
||||
namePat = optional(domainAndPort+`/`) + remoteName
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName)))
|
||||
|
||||
referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat)))
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier))
|
||||
)
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...string) string {
|
||||
return `(?:` + strings.Join(res, "") + `)?`
|
||||
}
|
||||
|
||||
// anyTimes wraps the expression in a non-capturing group that can occur
|
||||
// any number of times.
|
||||
func anyTimes(res ...string) string {
|
||||
return `(?:` + strings.Join(res, "") + `)*`
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...string) string {
|
||||
return `(` + strings.Join(res, "") + `)`
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...string) string {
|
||||
return `^` + strings.Join(res, "") + `$`
|
||||
}
|
||||
75
vendor/github.com/distribution/reference/sort.go
generated
vendored
Normal file
75
vendor/github.com/distribution/reference/sort.go
generated
vendored
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package reference
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sort sorts string references preferring higher information references.
|
||||
//
|
||||
// The precedence is as follows:
|
||||
//
|
||||
// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:<digest>")
|
||||
// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest")
|
||||
// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:<digest>")
|
||||
// 4. [Named] (e.g., "docker.io/library/busybox")
|
||||
// 5. [Digested] (e.g., "docker.io@sha256:<digest>")
|
||||
// 6. Parse error
|
||||
func Sort(references []string) []string {
|
||||
var prefs []Reference
|
||||
var bad []string
|
||||
|
||||
for _, ref := range references {
|
||||
pref, err := ParseAnyReference(ref)
|
||||
if err != nil {
|
||||
bad = append(bad, ref)
|
||||
} else {
|
||||
prefs = append(prefs, pref)
|
||||
}
|
||||
}
|
||||
sort.Slice(prefs, func(a, b int) bool {
|
||||
ar := refRank(prefs[a])
|
||||
br := refRank(prefs[b])
|
||||
if ar == br {
|
||||
return prefs[a].String() < prefs[b].String()
|
||||
}
|
||||
return ar < br
|
||||
})
|
||||
sort.Strings(bad)
|
||||
var refs []string
|
||||
for _, pref := range prefs {
|
||||
refs = append(refs, pref.String())
|
||||
}
|
||||
return append(refs, bad...)
|
||||
}
|
||||
|
||||
func refRank(ref Reference) uint8 {
|
||||
if _, ok := ref.(Named); ok {
|
||||
if _, ok = ref.(Tagged); ok {
|
||||
if _, ok = ref.(Digested); ok {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if _, ok = ref.(Digested); ok {
|
||||
return 3
|
||||
}
|
||||
return 4
|
||||
}
|
||||
return 5
|
||||
}
|
||||
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
|
|
@ -1,247 +0,0 @@
|
|||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
|
|
@ -1,143 +0,0 @@
|
|||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
||||
2
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
|
|
@ -4,7 +4,7 @@ import (
|
|||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
|
|
|||
2
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
|
|
@ -6,7 +6,7 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
|
|
|
|||
8
vendor/github.com/docker/docker/api/types/versions/compare.go
generated
vendored
8
vendor/github.com/docker/docker/api/types/versions/compare.go
generated
vendored
|
|
@ -16,11 +16,11 @@ func compare(v1, v2 string) int {
|
|||
otherTab = strings.Split(v2, ".")
|
||||
)
|
||||
|
||||
max := len(currTab)
|
||||
if len(otherTab) > max {
|
||||
max = len(otherTab)
|
||||
maxVer := len(currTab)
|
||||
if len(otherTab) > maxVer {
|
||||
maxVer = len(otherTab)
|
||||
}
|
||||
for i := 0; i < max; i++ {
|
||||
for i := 0; i < maxVer; i++ {
|
||||
var currInt, otherInt int
|
||||
|
||||
if len(currTab) > i {
|
||||
|
|
|
|||
8
vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md
generated
vendored
Normal file
8
vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
# v3.0.1
|
||||
|
||||
Fixed:
|
||||
- Security issue: an attacker specifying a large "p2c" value can cause
|
||||
JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
|
||||
amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
|
||||
disclosure and to Tom Tervoort for originally publishing the category of attack.
|
||||
https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
|
||||
5
vendor/github.com/go-jose/go-jose/v3/symmetric.go
generated
vendored
5
vendor/github.com/go-jose/go-jose/v3/symmetric.go
generated
vendored
|
|
@ -415,6 +415,11 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
|
|||
if p2c <= 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer")
|
||||
}
|
||||
if p2c > 1000000 {
|
||||
// An unauthenticated attacker can set a high P2C value. Set an upper limit to avoid
|
||||
// DoS attacks.
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: too high")
|
||||
}
|
||||
|
||||
// salt is UTF8(Alg) || 0x00 || Salt Input
|
||||
alg := headers.getAlgorithm()
|
||||
|
|
|
|||
20
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
20
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
|
|
@ -3,7 +3,7 @@
|
|||
before:
|
||||
hooks:
|
||||
- ./gen.sh
|
||||
- go install mvdan.cc/garble@v0.9.3
|
||||
- go install mvdan.cc/garble@v0.10.1
|
||||
|
||||
builds:
|
||||
-
|
||||
|
|
@ -92,16 +92,7 @@ builds:
|
|||
archives:
|
||||
-
|
||||
id: s2-binaries
|
||||
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
|
||||
replacements:
|
||||
aix: AIX
|
||||
darwin: OSX
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
freebsd: FreeBSD
|
||||
netbsd: NetBSD
|
||||
name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
|
@ -125,7 +116,7 @@ changelog:
|
|||
|
||||
nfpms:
|
||||
-
|
||||
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
vendor: Klaus Post
|
||||
homepage: https://github.com/klauspost/compress
|
||||
maintainer: Klaus Post <klauspost@gmail.com>
|
||||
|
|
@ -134,8 +125,3 @@ nfpms:
|
|||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
freebsd: FreeBSD
|
||||
amd64: x86_64
|
||||
|
|
|
|||
27
vendor/github.com/klauspost/compress/README.md
generated
vendored
27
vendor/github.com/klauspost/compress/README.md
generated
vendored
|
|
@ -16,6 +16,26 @@ This package provides various compression algorithms.
|
|||
|
||||
# changelog
|
||||
|
||||
* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
|
||||
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
|
||||
|
||||
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
|
||||
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
|
||||
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
|
||||
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
|
||||
|
||||
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
|
||||
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
|
||||
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
|
||||
* flate: Add limited window compression https://github.com/klauspost/compress/pull/843
|
||||
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
|
||||
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
|
||||
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
|
||||
|
||||
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
|
||||
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
|
||||
* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
|
||||
|
||||
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
|
||||
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
|
||||
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
|
||||
|
|
@ -50,6 +70,9 @@ This package provides various compression algorithms.
|
|||
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.15.x</summary>
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
|
|
@ -176,6 +199,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati
|
|||
|
||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.14.x</summary>
|
||||
|
||||
|
|
@ -636,6 +661,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
|
|||
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
|
||||
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
|
||||
* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
|
||||
* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
|
||||
|
||||
# license
|
||||
|
||||
|
|
|
|||
29
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
29
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
|
|
@ -7,6 +7,7 @@ package flate
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
|
@ -833,6 +834,12 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
|||
d.initDeflate()
|
||||
d.fill = (*compressor).fillDeflate
|
||||
d.step = (*compressor).deflateLazy
|
||||
case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
|
||||
d.w.logNewTablePenalty = 7
|
||||
d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).storeFast
|
||||
default:
|
||||
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
|
||||
}
|
||||
|
|
@ -929,6 +936,28 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
|||
return zw, err
|
||||
}
|
||||
|
||||
// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
|
||||
const MinCustomWindowSize = 32
|
||||
|
||||
// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
|
||||
const MaxCustomWindowSize = windowSize
|
||||
|
||||
// NewWriterWindow returns a new Writer compressing data with a custom window size.
|
||||
// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
|
||||
func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
|
||||
if windowSize < MinCustomWindowSize {
|
||||
return nil, errors.New("flate: requested window size less than MinWindowSize")
|
||||
}
|
||||
if windowSize > MaxCustomWindowSize {
|
||||
return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
|
||||
}
|
||||
var dw Writer
|
||||
if err := dw.d.init(w, -windowSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dw, nil
|
||||
}
|
||||
|
||||
// A Writer takes data written to it and writes the compressed
|
||||
// form of that data to an underlying writer (see NewWriter).
|
||||
type Writer struct {
|
||||
|
|
|
|||
23
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
23
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
|
|
@ -8,7 +8,6 @@ package flate
|
|||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type fastEnc interface {
|
||||
|
|
@ -192,25 +191,3 @@ func (e *fastGen) Reset() {
|
|||
}
|
||||
e.hist = e.hist[:0]
|
||||
}
|
||||
|
||||
// matchLen returns the maximum length.
|
||||
// 'a' must be the shortest of the two.
|
||||
func matchLen(a, b []byte) int {
|
||||
var checked int
|
||||
|
||||
for len(a) >= 8 {
|
||||
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
||||
return checked + (bits.TrailingZeros64(diff) >> 3)
|
||||
}
|
||||
checked += 8
|
||||
a = a[8:]
|
||||
b = b[8:]
|
||||
}
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return i + checked
|
||||
}
|
||||
}
|
||||
return len(a) + checked
|
||||
}
|
||||
|
|
|
|||
66
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
66
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
|
|
@ -120,8 +120,9 @@ func (h *huffmanDecoder) init(lengths []int) bool {
|
|||
const sanity = false
|
||||
|
||||
if h.chunks == nil {
|
||||
h.chunks = &[huffmanNumChunks]uint16{}
|
||||
h.chunks = new([huffmanNumChunks]uint16)
|
||||
}
|
||||
|
||||
if h.maxRead != 0 {
|
||||
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
|
||||
}
|
||||
|
|
@ -175,6 +176,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
|
|||
}
|
||||
|
||||
h.maxRead = min
|
||||
|
||||
chunks := h.chunks[:]
|
||||
for i := range chunks {
|
||||
chunks[i] = 0
|
||||
|
|
@ -202,8 +204,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
|
|||
if cap(h.links[off]) < numLinks {
|
||||
h.links[off] = make([]uint16, numLinks)
|
||||
} else {
|
||||
links := h.links[off][:0]
|
||||
h.links[off] = links[:numLinks]
|
||||
h.links[off] = h.links[off][:numLinks]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
@ -277,7 +278,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// The actual read interface needed by NewReader.
|
||||
// Reader is the actual read interface needed by NewReader.
|
||||
// If the passed in io.Reader does not also have ReadByte,
|
||||
// the NewReader will introduce its own buffering.
|
||||
type Reader interface {
|
||||
|
|
@ -285,6 +286,18 @@ type Reader interface {
|
|||
io.ByteReader
|
||||
}
|
||||
|
||||
type step uint8
|
||||
|
||||
const (
|
||||
copyData step = iota + 1
|
||||
nextBlock
|
||||
huffmanBytesBuffer
|
||||
huffmanBytesReader
|
||||
huffmanBufioReader
|
||||
huffmanStringsReader
|
||||
huffmanGenericReader
|
||||
)
|
||||
|
||||
// Decompress state.
|
||||
type decompressor struct {
|
||||
// Input source.
|
||||
|
|
@ -303,7 +316,7 @@ type decompressor struct {
|
|||
|
||||
// Next step in the decompression,
|
||||
// and decompression state.
|
||||
step func(*decompressor)
|
||||
step step
|
||||
stepState int
|
||||
err error
|
||||
toRead []byte
|
||||
|
|
@ -342,7 +355,7 @@ func (f *decompressor) nextBlock() {
|
|||
// compressed, fixed Huffman tables
|
||||
f.hl = &fixedHuffmanDecoder
|
||||
f.hd = nil
|
||||
f.huffmanBlockDecoder()()
|
||||
f.huffmanBlockDecoder()
|
||||
if debugDecode {
|
||||
fmt.Println("predefinied huffman block")
|
||||
}
|
||||
|
|
@ -353,7 +366,7 @@ func (f *decompressor) nextBlock() {
|
|||
}
|
||||
f.hl = &f.h1
|
||||
f.hd = &f.h2
|
||||
f.huffmanBlockDecoder()()
|
||||
f.huffmanBlockDecoder()
|
||||
if debugDecode {
|
||||
fmt.Println("dynamic huffman block")
|
||||
}
|
||||
|
|
@ -379,14 +392,16 @@ func (f *decompressor) Read(b []byte) (int, error) {
|
|||
if f.err != nil {
|
||||
return 0, f.err
|
||||
}
|
||||
f.step(f)
|
||||
|
||||
f.doStep()
|
||||
|
||||
if f.err != nil && len(f.toRead) == 0 {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Support the io.WriteTo interface for io.Copy and friends.
|
||||
// WriteTo implements the io.WriteTo interface for io.Copy and friends.
|
||||
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
|
||||
total := int64(0)
|
||||
flushed := false
|
||||
|
|
@ -410,7 +425,7 @@ func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
|
|||
return total, f.err
|
||||
}
|
||||
if f.err == nil {
|
||||
f.step(f)
|
||||
f.doStep()
|
||||
}
|
||||
if len(f.toRead) == 0 && f.err != nil && !flushed {
|
||||
f.toRead = f.dict.readFlush() // Flush what's left in case of error
|
||||
|
|
@ -631,7 +646,7 @@ func (f *decompressor) copyData() {
|
|||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).copyData
|
||||
f.step = copyData
|
||||
return
|
||||
}
|
||||
f.finishBlock()
|
||||
|
|
@ -644,7 +659,28 @@ func (f *decompressor) finishBlock() {
|
|||
}
|
||||
f.err = io.EOF
|
||||
}
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.step = nextBlock
|
||||
}
|
||||
|
||||
func (f *decompressor) doStep() {
|
||||
switch f.step {
|
||||
case copyData:
|
||||
f.copyData()
|
||||
case nextBlock:
|
||||
f.nextBlock()
|
||||
case huffmanBytesBuffer:
|
||||
f.huffmanBytesBuffer()
|
||||
case huffmanBytesReader:
|
||||
f.huffmanBytesReader()
|
||||
case huffmanBufioReader:
|
||||
f.huffmanBufioReader()
|
||||
case huffmanStringsReader:
|
||||
f.huffmanStringsReader()
|
||||
case huffmanGenericReader:
|
||||
f.huffmanGenericReader()
|
||||
default:
|
||||
panic("BUG: unexpected step state")
|
||||
}
|
||||
}
|
||||
|
||||
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
|
||||
|
|
@ -747,7 +783,7 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
|||
h1: f.h1,
|
||||
h2: f.h2,
|
||||
dict: f.dict,
|
||||
step: (*decompressor).nextBlock,
|
||||
step: nextBlock,
|
||||
}
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return nil
|
||||
|
|
@ -768,7 +804,7 @@ func NewReader(r io.Reader) io.ReadCloser {
|
|||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.step = nextBlock
|
||||
f.dict.init(maxMatchOffset, nil)
|
||||
return &f
|
||||
}
|
||||
|
|
@ -787,7 +823,7 @@ func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
|||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.step = nextBlock
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return &f
|
||||
}
|
||||
|
|
|
|||
34
vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
34
vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
|
|
@ -85,7 +85,7 @@ readLiteral:
|
|||
dict.writeByte(byte(v))
|
||||
if dict.availWrite() == 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBytesBuffer
|
||||
f.step = huffmanBytesBuffer
|
||||
f.stepState = stateInit
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -251,7 +251,7 @@ copyHistory:
|
|||
|
||||
if dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
|
||||
f.step = huffmanBytesBuffer // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -336,7 +336,7 @@ readLiteral:
|
|||
dict.writeByte(byte(v))
|
||||
if dict.availWrite() == 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBytesReader
|
||||
f.step = huffmanBytesReader
|
||||
f.stepState = stateInit
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -502,7 +502,7 @@ copyHistory:
|
|||
|
||||
if dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBytesReader // We need to continue this work
|
||||
f.step = huffmanBytesReader // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -587,7 +587,7 @@ readLiteral:
|
|||
dict.writeByte(byte(v))
|
||||
if dict.availWrite() == 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBufioReader
|
||||
f.step = huffmanBufioReader
|
||||
f.stepState = stateInit
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -753,7 +753,7 @@ copyHistory:
|
|||
|
||||
if dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBufioReader // We need to continue this work
|
||||
f.step = huffmanBufioReader // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -838,7 +838,7 @@ readLiteral:
|
|||
dict.writeByte(byte(v))
|
||||
if dict.availWrite() == 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanStringsReader
|
||||
f.step = huffmanStringsReader
|
||||
f.stepState = stateInit
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -1004,7 +1004,7 @@ copyHistory:
|
|||
|
||||
if dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanStringsReader // We need to continue this work
|
||||
f.step = huffmanStringsReader // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -1089,7 +1089,7 @@ readLiteral:
|
|||
dict.writeByte(byte(v))
|
||||
if dict.availWrite() == 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanGenericReader
|
||||
f.step = huffmanGenericReader
|
||||
f.stepState = stateInit
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -1255,7 +1255,7 @@ copyHistory:
|
|||
|
||||
if dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = dict.readFlush()
|
||||
f.step = (*decompressor).huffmanGenericReader // We need to continue this work
|
||||
f.step = huffmanGenericReader // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
f.b, f.nb = fb, fnb
|
||||
return
|
||||
|
|
@ -1265,19 +1265,19 @@ copyHistory:
|
|||
// Not reached
|
||||
}
|
||||
|
||||
func (f *decompressor) huffmanBlockDecoder() func() {
|
||||
func (f *decompressor) huffmanBlockDecoder() {
|
||||
switch f.r.(type) {
|
||||
case *bytes.Buffer:
|
||||
return f.huffmanBytesBuffer
|
||||
f.huffmanBytesBuffer()
|
||||
case *bytes.Reader:
|
||||
return f.huffmanBytesReader
|
||||
f.huffmanBytesReader()
|
||||
case *bufio.Reader:
|
||||
return f.huffmanBufioReader
|
||||
f.huffmanBufioReader()
|
||||
case *strings.Reader:
|
||||
return f.huffmanStringsReader
|
||||
f.huffmanStringsReader()
|
||||
case Reader:
|
||||
return f.huffmanGenericReader
|
||||
f.huffmanGenericReader()
|
||||
default:
|
||||
return f.huffmanGenericReader
|
||||
f.huffmanGenericReader()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
398
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
398
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
|
|
@ -308,3 +308,401 @@ emitRemainder:
|
|||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
||||
|
||||
// fastEncL5Window is a level 5 encoder,
|
||||
// but with a custom window size.
|
||||
type fastEncL5Window struct {
|
||||
hist []byte
|
||||
cur int32
|
||||
maxOffset int32
|
||||
table [tableSize]tableEntry
|
||||
bTable [tableSize]tableEntryPrev
|
||||
}
|
||||
|
||||
func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 12 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
hashShortBytes = 4
|
||||
)
|
||||
maxMatchOffset := e.maxOffset
|
||||
if debugDeflate && e.cur < 0 {
|
||||
panic(fmt.Sprint("e.cur < 0: ", e.cur))
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
e.bTable[i] = tableEntryPrev{}
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
break
|
||||
}
|
||||
// Shift down everything in the table that isn't already too far away.
|
||||
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
|
||||
for i := range e.table[:] {
|
||||
v := e.table[i].offset
|
||||
if v <= minOff {
|
||||
v = 0
|
||||
} else {
|
||||
v = v - e.cur + maxMatchOffset
|
||||
}
|
||||
e.table[i].offset = v
|
||||
}
|
||||
for i := range e.bTable[:] {
|
||||
v := e.bTable[i]
|
||||
if v.Cur.offset <= minOff {
|
||||
v.Cur.offset = 0
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
|
||||
if v.Prev.offset <= minOff {
|
||||
v.Prev.offset = 0
|
||||
} else {
|
||||
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
|
||||
}
|
||||
}
|
||||
e.bTable[i] = v
|
||||
}
|
||||
e.cur = maxMatchOffset
|
||||
}
|
||||
|
||||
s := e.addBlock(src)
|
||||
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
// We do not fill the token table.
|
||||
// This will be picked up by caller.
|
||||
dst.n = uint16(len(src))
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
nextEmit := s
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := int32(len(src) - inputMargin)
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
cv := load6432(src, s)
|
||||
for {
|
||||
const skipLog = 6
|
||||
const doEvery = 1
|
||||
|
||||
nextS := s
|
||||
var l int32
|
||||
var t int32
|
||||
for {
|
||||
nextHashS := hashLen(cv, tableBits, hashShortBytes)
|
||||
nextHashL := hash7(cv, tableBits)
|
||||
|
||||
s = nextS
|
||||
nextS = s + doEvery + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
// Fetch a short+long candidate
|
||||
sCandidate := e.table[nextHashS]
|
||||
lCandidate := e.bTable[nextHashL]
|
||||
next := load6432(src, nextS)
|
||||
entry := tableEntry{offset: s + e.cur}
|
||||
e.table[nextHashS] = entry
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = entry, eLong.Cur
|
||||
|
||||
nextHashS = hashLen(next, tableBits, hashShortBytes)
|
||||
nextHashL = hash7(next, tableBits)
|
||||
|
||||
t = lCandidate.Cur.offset - e.cur
|
||||
if s-t < maxMatchOffset {
|
||||
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
t2 := lCandidate.Prev.offset - e.cur
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
ml1 := e.matchlen(s+4, t2+4, src) + 4
|
||||
if ml1 > l {
|
||||
t = t2
|
||||
l = ml1
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
t = lCandidate.Prev.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
// Found a 4 match...
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
lCandidate = e.bTable[nextHashL]
|
||||
// Store the next match
|
||||
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
// If the next long is a candidate, use that...
|
||||
t2 := lCandidate.Cur.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset {
|
||||
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
break
|
||||
}
|
||||
}
|
||||
// If the previous long is a candidate, use that...
|
||||
t2 = lCandidate.Prev.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
l = ml
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
cv = next
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
if l == 0 {
|
||||
// Extend the 4-byte match as long as possible.
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else if l == maxMatchLength {
|
||||
l += e.matchlenLong(s+l, t+l, src)
|
||||
}
|
||||
|
||||
// Try to locate a better match by checking the end of best match...
|
||||
if sAt := s + l; l < 30 && sAt < sLimit {
|
||||
// Allow some bytes at the beginning to mismatch.
|
||||
// Sweet spot is 2/3 bytes depending on input.
|
||||
// 3 is only a little better when it is but sometimes a lot worse.
|
||||
// The skipped bytes are tested in Extend backwards,
|
||||
// and still picked up as part of the match if they do.
|
||||
const skipBeginning = 2
|
||||
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
|
||||
t2 := eLong - e.cur - l + skipBeginning
|
||||
s2 := s + skipBeginning
|
||||
off := s2 - t2
|
||||
if t2 >= 0 && off < maxMatchOffset && off > 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
if nextEmit < s {
|
||||
if false {
|
||||
emitLiteral(dst, src[nextEmit:s])
|
||||
} else {
|
||||
for _, v := range src[nextEmit:s] {
|
||||
dst.tokens[dst.n] = token(v)
|
||||
dst.litHist[v]++
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
}
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprintln("s-t", s, t))
|
||||
}
|
||||
if (s - t) > maxMatchOffset {
|
||||
panic(fmt.Sprintln("mmo", s-t))
|
||||
}
|
||||
if l < baseMatchLength {
|
||||
panic("bml")
|
||||
}
|
||||
}
|
||||
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
s = nextS + 1
|
||||
}
|
||||
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// Store every 3rd hash in-between.
|
||||
if true {
|
||||
const hashEvery = 3
|
||||
i := s - l + 1
|
||||
if i < s-1 {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
|
||||
// Do an long at i+1
|
||||
cv >>= 8
|
||||
t = tableEntry{offset: t.offset + 1}
|
||||
eLong = &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
|
||||
// We only have enough bits for a short entry at i+2
|
||||
cv >>= 8
|
||||
t = tableEntry{offset: t.offset + 1}
|
||||
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
|
||||
|
||||
// Skip one - otherwise we risk hitting 's'
|
||||
i += 4
|
||||
for ; i < s-1; i += hashEvery {
|
||||
cv := load6432(src, i)
|
||||
t := tableEntry{offset: i + e.cur}
|
||||
t2 := tableEntry{offset: t.offset + 1}
|
||||
eLong := &e.bTable[hash7(cv, tableBits)]
|
||||
eLong.Cur, eLong.Prev = t, eLong.Cur
|
||||
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s.
|
||||
x := load6432(src, s-1)
|
||||
o := e.cur + s - 1
|
||||
prevHashS := hashLen(x, tableBits, hashShortBytes)
|
||||
prevHashL := hash7(x, tableBits)
|
||||
e.table[prevHashS] = tableEntry{offset: o}
|
||||
eLong := &e.bTable[prevHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
|
||||
cv = x >> 8
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if int(nextEmit) < len(src) {
|
||||
// If nothing was added, don't encode literals.
|
||||
if dst.n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the encoding table.
|
||||
func (e *fastEncL5Window) Reset() {
|
||||
// We keep the same allocs, since we are compressing the same block sizes.
|
||||
if cap(e.hist) < allocHistory {
|
||||
e.hist = make([]byte, 0, allocHistory)
|
||||
}
|
||||
|
||||
// We offset current position so everything will be out of reach.
|
||||
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
|
||||
if e.cur <= int32(bufferReset) {
|
||||
e.cur += e.maxOffset + int32(len(e.hist))
|
||||
}
|
||||
e.hist = e.hist[:0]
|
||||
}
|
||||
|
||||
func (e *fastEncL5Window) addBlock(src []byte) int32 {
|
||||
// check if we have space already
|
||||
maxMatchOffset := e.maxOffset
|
||||
|
||||
if len(e.hist)+len(src) > cap(e.hist) {
|
||||
if cap(e.hist) == 0 {
|
||||
e.hist = make([]byte, 0, allocHistory)
|
||||
} else {
|
||||
if cap(e.hist) < int(maxMatchOffset*2) {
|
||||
panic("unexpected buffer size")
|
||||
}
|
||||
// Move down
|
||||
offset := int32(len(e.hist)) - maxMatchOffset
|
||||
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
|
||||
e.cur += offset
|
||||
e.hist = e.hist[:maxMatchOffset]
|
||||
}
|
||||
}
|
||||
s := int32(len(e.hist))
|
||||
e.hist = append(e.hist, src...)
|
||||
return s
|
||||
}
|
||||
|
||||
// matchlen will return the match length between offsets and t in src.
|
||||
// The maximum length returned is maxMatchLength - 4.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
|
||||
if debugDecode {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
if int(s) >= len(src) {
|
||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
||||
}
|
||||
if t < 0 {
|
||||
panic(fmt.Sprint("t < 0:", t))
|
||||
}
|
||||
if s-t > e.maxOffset {
|
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
||||
}
|
||||
}
|
||||
s1 := int(s) + maxMatchLength - 4
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:s1], src[t:]))
|
||||
}
|
||||
|
||||
// matchlenLong will return the match length between offsets and t in src.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
if int(s) >= len(src) {
|
||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
||||
}
|
||||
if t < 0 {
|
||||
panic(fmt.Sprint("t < 0:", t))
|
||||
}
|
||||
if s-t > e.maxOffset {
|
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
||||
}
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:], src[t:]))
|
||||
}
|
||||
|
|
|
|||
16
vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
generated
vendored
Normal file
16
vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// matchLen returns how many bytes match in a and b
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// len(a) <= len(b) and len(a) > 0
|
||||
//
|
||||
//go:noescape
|
||||
func matchLen(a []byte, b []byte) int
|
||||
68
vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
generated
vendored
Normal file
68
vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
generated
vendored
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
// Copied from S2 implementation.
|
||||
|
||||
//go:build !appengine && !noasm && gc && !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func matchLen(a []byte, b []byte) int
|
||||
// Requires: BMI
|
||||
TEXT ·matchLen(SB), NOSPLIT, $0-56
|
||||
MOVQ a_base+0(FP), AX
|
||||
MOVQ b_base+24(FP), CX
|
||||
MOVQ a_len+8(FP), DX
|
||||
|
||||
// matchLen
|
||||
XORL SI, SI
|
||||
CMPL DX, $0x08
|
||||
JB matchlen_match4_standalone
|
||||
|
||||
matchlen_loopback_standalone:
|
||||
MOVQ (AX)(SI*1), BX
|
||||
XORQ (CX)(SI*1), BX
|
||||
TESTQ BX, BX
|
||||
JZ matchlen_loop_standalone
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
TZCNTQ BX, BX
|
||||
#else
|
||||
BSFQ BX, BX
|
||||
#endif
|
||||
SARQ $0x03, BX
|
||||
LEAL (SI)(BX*1), SI
|
||||
JMP gen_match_len_end
|
||||
|
||||
matchlen_loop_standalone:
|
||||
LEAL -8(DX), DX
|
||||
LEAL 8(SI), SI
|
||||
CMPL DX, $0x08
|
||||
JAE matchlen_loopback_standalone
|
||||
|
||||
matchlen_match4_standalone:
|
||||
CMPL DX, $0x04
|
||||
JB matchlen_match2_standalone
|
||||
MOVL (AX)(SI*1), BX
|
||||
CMPL (CX)(SI*1), BX
|
||||
JNE matchlen_match2_standalone
|
||||
LEAL -4(DX), DX
|
||||
LEAL 4(SI), SI
|
||||
|
||||
matchlen_match2_standalone:
|
||||
CMPL DX, $0x02
|
||||
JB matchlen_match1_standalone
|
||||
MOVW (AX)(SI*1), BX
|
||||
CMPW (CX)(SI*1), BX
|
||||
JNE matchlen_match1_standalone
|
||||
LEAL -2(DX), DX
|
||||
LEAL 2(SI), SI
|
||||
|
||||
matchlen_match1_standalone:
|
||||
CMPL DX, $0x01
|
||||
JB gen_match_len_end
|
||||
MOVB (AX)(SI*1), BL
|
||||
CMPB (CX)(SI*1), BL
|
||||
JNE gen_match_len_end
|
||||
INCL SI
|
||||
|
||||
gen_match_len_end:
|
||||
MOVQ SI, ret+48(FP)
|
||||
RET
|
||||
33
vendor/github.com/klauspost/compress/flate/matchlen_generic.go
generated
vendored
Normal file
33
vendor/github.com/klauspost/compress/flate/matchlen_generic.go
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
//go:build !amd64 || appengine || !gc || noasm
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// matchLen returns the maximum common prefix length of a and b.
|
||||
// a must be the shortest of the two.
|
||||
func matchLen(a, b []byte) (n int) {
|
||||
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||
if diff != 0 {
|
||||
return n + bits.TrailingZeros64(diff)>>3
|
||||
}
|
||||
n += 8
|
||||
}
|
||||
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
return n
|
||||
|
||||
}
|
||||
3
vendor/github.com/klauspost/compress/fse/bitwriter.go
generated
vendored
3
vendor/github.com/klauspost/compress/fse/bitwriter.go
generated
vendored
|
|
@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() {
|
|||
|
||||
// close will write the alignment bit and write the final byte(s)
|
||||
// to the output.
|
||||
func (b *bitWriter) close() error {
|
||||
func (b *bitWriter) close() {
|
||||
// End mark
|
||||
b.addBits16Clean(1, 1)
|
||||
// flush until next byte.
|
||||
b.flushAlign()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reset and continue writing by appending to out.
|
||||
|
|
|
|||
5
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
5
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
|
|
@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error {
|
|||
c2.flush(s.actualTableLog)
|
||||
c1.flush(s.actualTableLog)
|
||||
|
||||
return s.bw.close()
|
||||
s.bw.close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeCount will write the normalized histogram count to header.
|
||||
|
|
@ -211,7 +212,7 @@ func (s *Scratch) writeCount() error {
|
|||
previous0 bool
|
||||
charnum uint16
|
||||
|
||||
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
|
||||
maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
|
||||
|
||||
// Write Table Size
|
||||
bitStream = uint32(tableLog - minTablelog)
|
||||
|
|
|
|||
3
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
3
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
|
|
@ -94,10 +94,9 @@ func (b *bitWriter) flushAlign() {
|
|||
|
||||
// close will write the alignment bit and write the final byte(s)
|
||||
// to the output.
|
||||
func (b *bitWriter) close() error {
|
||||
func (b *bitWriter) close() {
|
||||
// End mark
|
||||
b.addBits16Clean(1, 1)
|
||||
// flush until next byte.
|
||||
b.flushAlign()
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
20
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
20
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
|
|
@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err
|
|||
}
|
||||
|
||||
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
|
||||
return s.compress1xDo(s.Out, src)
|
||||
return s.compress1xDo(s.Out, src), nil
|
||||
}
|
||||
|
||||
func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
||||
func (s *Scratch) compress1xDo(dst, src []byte) []byte {
|
||||
var bw = bitWriter{out: dst}
|
||||
|
||||
// N is length divisible by 4.
|
||||
|
|
@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
|||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
||||
}
|
||||
}
|
||||
err := bw.close()
|
||||
return bw.out, err
|
||||
bw.close()
|
||||
return bw.out
|
||||
}
|
||||
|
||||
var sixZeros [6]byte
|
||||
|
|
@ -283,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
|||
}
|
||||
src = src[len(toDo):]
|
||||
|
||||
var err error
|
||||
idx := len(s.Out)
|
||||
s.Out, err = s.compress1xDo(s.Out, toDo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Out = s.compress1xDo(s.Out, toDo)
|
||||
if len(s.Out)-idx > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
return nil, ErrIncompressible
|
||||
|
|
@ -315,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
|||
|
||||
segmentSize := (len(src) + 3) / 4
|
||||
var wg sync.WaitGroup
|
||||
var errs [4]error
|
||||
wg.Add(4)
|
||||
for i := 0; i < 4; i++ {
|
||||
toDo := src
|
||||
|
|
@ -326,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
|||
|
||||
// Separate goroutine for each block.
|
||||
go func(i int) {
|
||||
s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
|
||||
s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
for i := 0; i < 4; i++ {
|
||||
if errs[i] != nil {
|
||||
return nil, errs[i]
|
||||
}
|
||||
o := s.tmpOut[i]
|
||||
if len(o) > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
|
|
|
|||
34
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
34
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
|
|
@ -17,7 +17,6 @@ import (
|
|||
// for aligning the input.
|
||||
type bitReader struct {
|
||||
in []byte
|
||||
off uint // next byte to read is at in[off - 1]
|
||||
value uint64 // Maybe use [16]byte, but shifting is awkward.
|
||||
bitsRead uint8
|
||||
}
|
||||
|
|
@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error {
|
|||
return errors.New("corrupt stream: too short")
|
||||
}
|
||||
b.in = in
|
||||
b.off = uint(len(in))
|
||||
// The highest bit of the last byte indicates where to start
|
||||
v := in[len(in)-1]
|
||||
if v == 0 {
|
||||
|
|
@ -69,21 +67,19 @@ func (b *bitReader) fillFast() {
|
|||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
}
|
||||
|
||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReader) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
v := b.in[len(b.in)-8:]
|
||||
b.in = b.in[:len(b.in)-8]
|
||||
b.value = binary.LittleEndian.Uint64(v)
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
|
||||
// fill() will make sure at least 32 bits are available.
|
||||
|
|
@ -91,25 +87,25 @@ func (b *bitReader) fill() {
|
|||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if b.off >= 4 {
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
if len(b.in) >= 4 {
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
return
|
||||
}
|
||||
for b.off > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
||||
b.bitsRead -= 8
|
||||
b.off--
|
||||
|
||||
b.bitsRead -= uint8(8 * len(b.in))
|
||||
for len(b.in) > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
|
||||
b.in = b.in[:len(b.in)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// finished returns true if all bits have been read from the bit stream.
|
||||
func (b *bitReader) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
return len(b.in) == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
// overread returns true if more bits have been requested than is on the stream.
|
||||
|
|
@ -119,7 +115,7 @@ func (b *bitReader) overread() bool {
|
|||
|
||||
// remain returns the number of bits remaining.
|
||||
func (b *bitReader) remain() uint {
|
||||
return b.off*8 + 64 - uint(b.bitsRead)
|
||||
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
|
|
|
|||
3
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
|
|
@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() {
|
|||
|
||||
// close will write the alignment bit and write the final byte(s)
|
||||
// to the output.
|
||||
func (b *bitWriter) close() error {
|
||||
func (b *bitWriter) close() {
|
||||
// End mark
|
||||
b.addBits16Clean(1, 1)
|
||||
// flush until next byte.
|
||||
b.flushAlign()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reset and continue writing by appending to out.
|
||||
|
|
|
|||
29
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
29
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
|
|
@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
|
|||
if len(lits) >= 1024 {
|
||||
// Use 4 Streams.
|
||||
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
|
||||
} else if len(lits) > 32 {
|
||||
} else if len(lits) > 16 {
|
||||
// Use 1 stream
|
||||
single = true
|
||||
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
|
||||
} else {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
|
||||
if err == nil && len(out)+5 > len(lits) {
|
||||
// If we are close, we may still be worse or equal to raw.
|
||||
var lh literalsHeader
|
||||
lh.setSizes(len(out), len(lits), single)
|
||||
if len(out)+lh.size() >= len(lits) {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case huff0.ErrIncompressible:
|
||||
if debugEncoder {
|
||||
|
|
@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
if len(b.literals) >= 1024 && !raw {
|
||||
// Use 4 Streams.
|
||||
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
|
||||
} else if len(b.literals) > 32 && !raw {
|
||||
} else if len(b.literals) > 16 && !raw {
|
||||
// Use 1 stream
|
||||
single = true
|
||||
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
|
||||
|
|
@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
err = huff0.ErrIncompressible
|
||||
}
|
||||
|
||||
if err == nil && len(out)+5 > len(b.literals) {
|
||||
// If we are close, we may still be worse or equal to raw.
|
||||
var lh literalsHeader
|
||||
lh.setSize(len(b.literals))
|
||||
szRaw := lh.size()
|
||||
lh.setSizes(len(out), len(b.literals), single)
|
||||
szComp := lh.size()
|
||||
if len(out)+szComp >= len(b.literals)+szRaw {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case huff0.ErrIncompressible:
|
||||
lh.setType(literalsBlockRaw)
|
||||
|
|
@ -773,10 +791,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
ml.flush(mlEnc.actualTableLog)
|
||||
of.flush(ofEnc.actualTableLog)
|
||||
ll.flush(llEnc.actualTableLog)
|
||||
err = wr.close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wr.close()
|
||||
b.output = wr.out
|
||||
|
||||
// Maybe even add a bigger margin.
|
||||
|
|
|
|||
375
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
375
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
|
|
@ -1,10 +1,13 @@
|
|||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/klauspost/compress/huff0"
|
||||
)
|
||||
|
|
@ -14,7 +17,6 @@ type dict struct {
|
|||
|
||||
litEnc *huff0.Scratch
|
||||
llDec, ofDec, mlDec sequenceDec
|
||||
//llEnc, ofEnc, mlEnc []*fseEncoder
|
||||
offsets [3]int
|
||||
content []byte
|
||||
}
|
||||
|
|
@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface {
|
|||
d, err := loadDict(b)
|
||||
return d, err
|
||||
}
|
||||
|
||||
type BuildDictOptions struct {
|
||||
// Dictionary ID.
|
||||
ID uint32
|
||||
|
||||
// Content to use to create dictionary tables.
|
||||
Contents [][]byte
|
||||
|
||||
// History to use for all blocks.
|
||||
History []byte
|
||||
|
||||
// Offsets to use.
|
||||
Offsets [3]int
|
||||
|
||||
// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
|
||||
// See https://github.com/facebook/zstd/issues/3724
|
||||
CompatV155 bool
|
||||
|
||||
// Use the specified encoder level.
|
||||
// The dictionary will be built using the specified encoder level,
|
||||
// which will reflect speed and make the dictionary tailored for that level.
|
||||
// If not set SpeedBestCompression will be used.
|
||||
Level EncoderLevel
|
||||
|
||||
// DebugOut will write stats and other details here if set.
|
||||
DebugOut io.Writer
|
||||
}
|
||||
|
||||
func BuildDict(o BuildDictOptions) ([]byte, error) {
|
||||
initPredefined()
|
||||
hist := o.History
|
||||
contents := o.Contents
|
||||
debug := o.DebugOut != nil
|
||||
println := func(args ...interface{}) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprintln(o.DebugOut, args...)
|
||||
}
|
||||
}
|
||||
printf := func(s string, args ...interface{}) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprintf(o.DebugOut, s, args...)
|
||||
}
|
||||
}
|
||||
print := func(args ...interface{}) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprint(o.DebugOut, args...)
|
||||
}
|
||||
}
|
||||
|
||||
if int64(len(hist)) > dictMaxLength {
|
||||
return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
|
||||
}
|
||||
if len(hist) < 8 {
|
||||
return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
|
||||
}
|
||||
if len(contents) == 0 {
|
||||
return nil, errors.New("no content provided")
|
||||
}
|
||||
d := dict{
|
||||
id: o.ID,
|
||||
litEnc: nil,
|
||||
llDec: sequenceDec{},
|
||||
ofDec: sequenceDec{},
|
||||
mlDec: sequenceDec{},
|
||||
offsets: o.Offsets,
|
||||
content: hist,
|
||||
}
|
||||
block := blockEnc{lowMem: false}
|
||||
block.init()
|
||||
enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
|
||||
if o.Level != 0 {
|
||||
eOpts := encoderOptions{
|
||||
level: o.Level,
|
||||
blockSize: maxMatchLen,
|
||||
windowSize: maxMatchLen,
|
||||
dict: &d,
|
||||
lowMem: false,
|
||||
}
|
||||
enc = eOpts.encoder()
|
||||
} else {
|
||||
o.Level = SpeedBestCompression
|
||||
}
|
||||
var (
|
||||
remain [256]int
|
||||
ll [256]int
|
||||
ml [256]int
|
||||
of [256]int
|
||||
)
|
||||
addValues := func(dst *[256]int, src []byte) {
|
||||
for _, v := range src {
|
||||
dst[v]++
|
||||
}
|
||||
}
|
||||
addHist := func(dst *[256]int, src *[256]uint32) {
|
||||
for i, v := range src {
|
||||
dst[i] += int(v)
|
||||
}
|
||||
}
|
||||
seqs := 0
|
||||
nUsed := 0
|
||||
litTotal := 0
|
||||
newOffsets := make(map[uint32]int, 1000)
|
||||
for _, b := range contents {
|
||||
block.reset(nil)
|
||||
if len(b) < 8 {
|
||||
continue
|
||||
}
|
||||
nUsed++
|
||||
enc.Reset(&d, true)
|
||||
enc.Encode(&block, b)
|
||||
addValues(&remain, block.literals)
|
||||
litTotal += len(block.literals)
|
||||
seqs += len(block.sequences)
|
||||
block.genCodes()
|
||||
addHist(&ll, block.coders.llEnc.Histogram())
|
||||
addHist(&ml, block.coders.mlEnc.Histogram())
|
||||
addHist(&of, block.coders.ofEnc.Histogram())
|
||||
for i, seq := range block.sequences {
|
||||
if i > 3 {
|
||||
break
|
||||
}
|
||||
offset := seq.offset
|
||||
if offset == 0 {
|
||||
continue
|
||||
}
|
||||
if offset > 3 {
|
||||
newOffsets[offset-3]++
|
||||
} else {
|
||||
newOffsets[uint32(o.Offsets[offset-1])]++
|
||||
}
|
||||
}
|
||||
}
|
||||
// Find most used offsets.
|
||||
var sortedOffsets []uint32
|
||||
for k := range newOffsets {
|
||||
sortedOffsets = append(sortedOffsets, k)
|
||||
}
|
||||
sort.Slice(sortedOffsets, func(i, j int) bool {
|
||||
a, b := sortedOffsets[i], sortedOffsets[j]
|
||||
if a == b {
|
||||
// Prefer the longer offset
|
||||
return sortedOffsets[i] > sortedOffsets[j]
|
||||
}
|
||||
return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
|
||||
})
|
||||
if len(sortedOffsets) > 3 {
|
||||
if debug {
|
||||
print("Offsets:")
|
||||
for i, v := range sortedOffsets {
|
||||
if i > 20 {
|
||||
break
|
||||
}
|
||||
printf("[%d: %d],", v, newOffsets[v])
|
||||
}
|
||||
println("")
|
||||
}
|
||||
|
||||
sortedOffsets = sortedOffsets[:3]
|
||||
}
|
||||
for i, v := range sortedOffsets {
|
||||
o.Offsets[i] = int(v)
|
||||
}
|
||||
if debug {
|
||||
println("New repeat offsets", o.Offsets)
|
||||
}
|
||||
|
||||
if nUsed == 0 || seqs == 0 {
|
||||
return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
|
||||
}
|
||||
if debug {
|
||||
println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
|
||||
}
|
||||
if seqs/nUsed < 512 {
|
||||
// Use 512 as minimum.
|
||||
nUsed = seqs / 512
|
||||
}
|
||||
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
|
||||
hist := dst.Histogram()
|
||||
var maxSym uint8
|
||||
var maxCount int
|
||||
var fakeLength int
|
||||
for i, v := range src {
|
||||
if v > 0 {
|
||||
v = v / nUsed
|
||||
if v == 0 {
|
||||
v = 1
|
||||
}
|
||||
}
|
||||
if v > maxCount {
|
||||
maxCount = v
|
||||
}
|
||||
if v != 0 {
|
||||
maxSym = uint8(i)
|
||||
}
|
||||
fakeLength += v
|
||||
hist[i] = uint32(v)
|
||||
}
|
||||
dst.HistogramFinished(maxSym, maxCount)
|
||||
dst.reUsed = false
|
||||
dst.useRLE = false
|
||||
err := dst.normalizeCount(fakeLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if debug {
|
||||
println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
|
||||
}
|
||||
return dst.writeCount(nil)
|
||||
}
|
||||
if debug {
|
||||
print("Literal lengths: ")
|
||||
}
|
||||
llTable, err := copyHist(block.coders.llEnc, &ll)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if debug {
|
||||
print("Match lengths: ")
|
||||
}
|
||||
mlTable, err := copyHist(block.coders.mlEnc, &ml)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if debug {
|
||||
print("Offsets: ")
|
||||
}
|
||||
ofTable, err := copyHist(block.coders.ofEnc, &of)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Literal table
|
||||
avgSize := litTotal
|
||||
if avgSize > huff0.BlockSizeMax/2 {
|
||||
avgSize = huff0.BlockSizeMax / 2
|
||||
}
|
||||
huffBuff := make([]byte, 0, avgSize)
|
||||
// Target size
|
||||
div := litTotal / avgSize
|
||||
if div < 1 {
|
||||
div = 1
|
||||
}
|
||||
if debug {
|
||||
println("Huffman weights:")
|
||||
}
|
||||
for i, n := range remain[:] {
|
||||
if n > 0 {
|
||||
n = n / div
|
||||
// Allow all entries to be represented.
|
||||
if n == 0 {
|
||||
n = 1
|
||||
}
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||
if debug {
|
||||
printf("[%d: %d], ", i, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
if o.CompatV155 && remain[255]/div == 0 {
|
||||
huffBuff = append(huffBuff, 255)
|
||||
}
|
||||
scratch := &huff0.Scratch{TableLog: 11}
|
||||
for tries := 0; tries < 255; tries++ {
|
||||
scratch = &huff0.Scratch{TableLog: 11}
|
||||
_, _, err = huff0.Compress1X(huffBuff, scratch)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if debug {
|
||||
printf("Try %d: Huffman error: %v\n", tries+1, err)
|
||||
}
|
||||
huffBuff = huffBuff[:0]
|
||||
if tries == 250 {
|
||||
if debug {
|
||||
println("Huffman: Bailing out with predefined table")
|
||||
}
|
||||
|
||||
// Bail out.... Just generate something
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
|
||||
for i := 0; i < 128; i++ {
|
||||
huffBuff = append(huffBuff, byte(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, huff0.ErrIncompressible) {
|
||||
// Try truncating least common.
|
||||
for i, n := range remain[:] {
|
||||
if n > 0 {
|
||||
n = n / (div * (i + 1))
|
||||
if n > 0 {
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
|
||||
huffBuff = append(huffBuff, 255)
|
||||
}
|
||||
if len(huffBuff) == 0 {
|
||||
huffBuff = append(huffBuff, 0, 255)
|
||||
}
|
||||
}
|
||||
if errors.Is(err, huff0.ErrUseRLE) {
|
||||
for i, n := range remain[:] {
|
||||
n = n / (div * (i + 1))
|
||||
// Allow all entries to be represented.
|
||||
if n == 0 {
|
||||
n = 1
|
||||
}
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
out.Write([]byte(dictMagic))
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
|
||||
out.Write(scratch.OutTable)
|
||||
if debug {
|
||||
println("huff table:", len(scratch.OutTable), "bytes")
|
||||
println("of table:", len(ofTable), "bytes")
|
||||
println("ml table:", len(mlTable), "bytes")
|
||||
println("ll table:", len(llTable), "bytes")
|
||||
}
|
||||
out.Write(ofTable)
|
||||
out.Write(mlTable)
|
||||
out.Write(llTable)
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
|
||||
out.Write(hist)
|
||||
if debug {
|
||||
_, err := loadDict(out.Bytes())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
i, err := InspectDictionary(out.Bytes())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
println("ID:", i.ID())
|
||||
println("Content size:", i.ContentSize())
|
||||
println("Encoder:", i.LitEncoder() != nil)
|
||||
println("Offsets:", i.Offsets())
|
||||
var totalSize int
|
||||
for _, b := range contents {
|
||||
totalSize += len(b)
|
||||
}
|
||||
|
||||
encWith := func(opts ...EOption) int {
|
||||
enc, err := NewWriter(nil, opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer enc.Close()
|
||||
var dst []byte
|
||||
var totalSize int
|
||||
for _, b := range contents {
|
||||
dst = enc.EncodeAll(b, dst[:0])
|
||||
totalSize += len(dst)
|
||||
}
|
||||
return totalSize
|
||||
}
|
||||
plain := encWith(WithEncoderLevel(o.Level))
|
||||
withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
|
||||
println("Input size:", totalSize)
|
||||
println("Plain Compressed:", plain)
|
||||
println("Dict Compressed:", withDict)
|
||||
println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
|
||||
}
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
|
|
|||
55
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
55
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
|
|
@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) {
|
|||
if m.rep < 0 {
|
||||
ofc = ofCode(uint32(m.s-m.offset) + 3)
|
||||
} else {
|
||||
ofc = ofCode(uint32(m.rep))
|
||||
ofc = ofCode(uint32(m.rep) & 3)
|
||||
}
|
||||
// Cost, excluding
|
||||
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
|
||||
|
|
@ -197,12 +197,13 @@ encodeLoop:
|
|||
|
||||
// Set m to a match at offset if it looks like that will improve compression.
|
||||
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
|
||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||
delta := s - offset
|
||||
if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
|
||||
return
|
||||
}
|
||||
if debugAsserts {
|
||||
if offset <= 0 {
|
||||
panic(offset)
|
||||
if offset >= s {
|
||||
panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
|
||||
}
|
||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||
|
|
@ -226,7 +227,7 @@ encodeLoop:
|
|||
}
|
||||
}
|
||||
l := 4 + e.matchlen(s+4, offset+4, src)
|
||||
if rep < 0 {
|
||||
if true {
|
||||
// Extend candidate match backwards as far as possible.
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
|
|
@ -281,6 +282,7 @@ encodeLoop:
|
|||
// Load next and check...
|
||||
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
|
||||
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
|
||||
index0 := s + 1
|
||||
|
||||
// Look far ahead, unless we have a really long match already...
|
||||
if best.length < goodEnough {
|
||||
|
|
@ -343,8 +345,8 @@ encodeLoop:
|
|||
if best.rep > 0 {
|
||||
var seq seq
|
||||
seq.matchLen = uint32(best.length - zstdMinMatch)
|
||||
if debugAsserts && s <= nextEmit {
|
||||
panic("s <= nextEmit")
|
||||
if debugAsserts && s < nextEmit {
|
||||
panic("s < nextEmit")
|
||||
}
|
||||
addLiterals(&seq, best.s)
|
||||
|
||||
|
|
@ -356,19 +358,16 @@ encodeLoop:
|
|||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Index old s + 1 -> s - 1
|
||||
index0 := s + 1
|
||||
s = best.s + best.length
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, best.length)
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
|
||||
// Index skipped...
|
||||
end := s
|
||||
if s > sLimit+4 {
|
||||
end = sLimit + 4
|
||||
}
|
||||
off := index0 + e.cur
|
||||
for index0 < s {
|
||||
for index0 < end {
|
||||
cv0 := load6432(src, index0)
|
||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||
|
|
@ -377,6 +376,7 @@ encodeLoop:
|
|||
off++
|
||||
index0++
|
||||
}
|
||||
|
||||
switch best.rep {
|
||||
case 2, 4 | 1:
|
||||
offset1, offset2 = offset2, offset1
|
||||
|
|
@ -385,12 +385,17 @@ encodeLoop:
|
|||
case 4 | 3:
|
||||
offset1, offset2, offset3 = offset1-1, offset1, offset2
|
||||
}
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, best.length)
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. Update recent offsets.
|
||||
// We'll later see if more than 4 bytes.
|
||||
index0 := s + 1
|
||||
s = best.s
|
||||
t := best.offset
|
||||
offset1, offset2, offset3 = s-t, offset1, offset2
|
||||
|
|
@ -418,19 +423,25 @@ encodeLoop:
|
|||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
|
||||
// Index old s + 1 -> s - 1 or sLimit
|
||||
end := s
|
||||
if s > sLimit-4 {
|
||||
end = sLimit - 4
|
||||
}
|
||||
|
||||
// Index old s + 1 -> s - 1
|
||||
for index0 < s {
|
||||
off := index0 + e.cur
|
||||
for index0 < end {
|
||||
cv0 := load6432(src, index0)
|
||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
||||
index0++
|
||||
off++
|
||||
}
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
17
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
17
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
|
|
@ -145,7 +145,7 @@ encodeLoop:
|
|||
var t int32
|
||||
// We allow the encoder to optionally turn off repeat offsets across blocks
|
||||
canRepeat := len(blk.sequences) > 2
|
||||
var matched int32
|
||||
var matched, index0 int32
|
||||
|
||||
for {
|
||||
if debugAsserts && canRepeat && offset1 == 0 {
|
||||
|
|
@ -162,6 +162,7 @@ encodeLoop:
|
|||
off := s + e.cur
|
||||
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
|
||||
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
|
||||
index0 = s + 1
|
||||
|
||||
if canRepeat {
|
||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
|
|
@ -258,7 +259,6 @@ encodeLoop:
|
|||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
index0 := s + repOff2
|
||||
s += lenght + repOff2
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
|
|
@ -498,15 +498,15 @@ encodeLoop:
|
|||
}
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s - l + 1
|
||||
off := index0 + e.cur
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
index0 += 2
|
||||
off += 2
|
||||
}
|
||||
|
||||
cv = load6432(src, s)
|
||||
|
|
@ -672,7 +672,7 @@ encodeLoop:
|
|||
var t int32
|
||||
// We allow the encoder to optionally turn off repeat offsets across blocks
|
||||
canRepeat := len(blk.sequences) > 2
|
||||
var matched int32
|
||||
var matched, index0 int32
|
||||
|
||||
for {
|
||||
if debugAsserts && canRepeat && offset1 == 0 {
|
||||
|
|
@ -691,6 +691,7 @@ encodeLoop:
|
|||
e.markLongShardDirty(nextHashL)
|
||||
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
|
||||
e.markShortShardDirty(nextHashS)
|
||||
index0 = s + 1
|
||||
|
||||
if canRepeat {
|
||||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
|
|
@ -726,7 +727,6 @@ encodeLoop:
|
|||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s + repOff
|
||||
s += lenght + repOff
|
||||
|
||||
nextEmit = s
|
||||
|
|
@ -790,7 +790,6 @@ encodeLoop:
|
|||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
index0 := s + repOff2
|
||||
s += lenght + repOff2
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
|
|
@ -1024,18 +1023,18 @@ encodeLoop:
|
|||
}
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s - l + 1
|
||||
off := index0 + e.cur
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.markLongShardDirty(h0)
|
||||
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
|
||||
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.markShortShardDirty(h1)
|
||||
index0 += 2
|
||||
off += 2
|
||||
}
|
||||
|
||||
cv = load6432(src, s)
|
||||
|
|
|
|||
13
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
13
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
|
|
@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||
DictID: e.o.dict.ID(),
|
||||
}
|
||||
|
||||
dst, err := fh.appendTo(tmp[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dst := fh.appendTo(tmp[:0])
|
||||
s.headerWritten = true
|
||||
s.wWg.Wait()
|
||||
var n2 int
|
||||
|
|
@ -483,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
Checksum: false,
|
||||
DictID: 0,
|
||||
}
|
||||
dst, _ = fh.appendTo(dst)
|
||||
dst = fh.appendTo(dst)
|
||||
|
||||
// Write raw block as last one only.
|
||||
var blk blockHeader
|
||||
|
|
@ -518,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
|
||||
dst = make([]byte, 0, len(src))
|
||||
}
|
||||
dst, err := fh.appendTo(dst)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dst = fh.appendTo(dst)
|
||||
|
||||
// If we can do everything in one block, prefer that.
|
||||
if len(src) <= e.o.blockSize {
|
||||
|
|
@ -581,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
// Add padding with content from crypto/rand.Reader
|
||||
if e.o.pad > 0 {
|
||||
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
|
||||
var err error
|
||||
dst, err = skippableFrame(dst, add, rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
|||
4
vendor/github.com/klauspost/compress/zstd/frameenc.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/frameenc.go
generated
vendored
|
|
@ -22,7 +22,7 @@ type frameHeader struct {
|
|||
|
||||
const maxHeaderSize = 14
|
||||
|
||||
func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
|
||||
func (f frameHeader) appendTo(dst []byte) []byte {
|
||||
dst = append(dst, frameMagic...)
|
||||
var fhd uint8
|
||||
if f.Checksum {
|
||||
|
|
@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
|
|||
default:
|
||||
panic("invalid fcs")
|
||||
}
|
||||
return dst, nil
|
||||
return dst
|
||||
}
|
||||
|
||||
const skippableFrameHeader = 4 + 4
|
||||
|
|
|
|||
11
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
|
|
@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var ll, mo, ml int
|
||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
|
|
@ -452,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol)
|
|||
|
||||
// extra bits are stored in reverse order.
|
||||
br.fill()
|
||||
if s.maxBits <= 32 {
|
||||
mo += br.getBits(moB)
|
||||
ml += br.getBits(mlB)
|
||||
ll += br.getBits(llB)
|
||||
} else {
|
||||
mo += br.getBits(moB)
|
||||
if s.maxBits > 32 {
|
||||
br.fill()
|
||||
}
|
||||
// matchlength+literal length, max 32 bits
|
||||
ml += br.getBits(mlB)
|
||||
ll += br.getBits(llB)
|
||||
|
||||
}
|
||||
mo = s.adjustOffset(mo, ll, moB)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue