vendor: Update osbuild/images to commit dd48a38be218
This is needed for the test_distro.NewTestDistro change.
This commit is contained in:
parent
eab16830aa
commit
1b65f15449
345 changed files with 276130 additions and 14546 deletions
83
go.mod
83
go.mod
|
|
@ -6,13 +6,13 @@ exclude github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
|||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.23.0
|
||||
cloud.google.com/go/storage v1.32.0
|
||||
cloud.google.com/go/storage v1.33.0
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
|
||||
github.com/Azure/go-autorest/autorest v0.11.29
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
|
||||
github.com/BurntSushi/toml v1.3.2
|
||||
github.com/aws/aws-sdk-go v1.44.332
|
||||
github.com/aws/aws-sdk-go v1.45.10
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/deepmap/oapi-codegen v1.8.2
|
||||
|
|
@ -21,7 +21,7 @@ require (
|
|||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/uuid v1.3.1
|
||||
github.com/gophercloud/gophercloud v1.5.0
|
||||
github.com/gophercloud/gophercloud v1.6.0
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4
|
||||
github.com/jackc/pgtype v1.14.0
|
||||
github.com/jackc/pgx/v4 v4.18.1
|
||||
|
|
@ -31,7 +31,7 @@ require (
|
|||
github.com/labstack/gommon v0.4.0
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.364
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0
|
||||
github.com/osbuild/images v0.3.0
|
||||
github.com/osbuild/images v0.5.1-0.20230915095808-dd48a38be218
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/segmentio/ksuid v1.0.4
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
|
|
@ -39,18 +39,18 @@ require (
|
|||
github.com/stretchr/testify v1.8.4
|
||||
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453
|
||||
github.com/vmware/govmomi v0.30.7
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
golang.org/x/oauth2 v0.12.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.11.0
|
||||
google.golang.org/api v0.138.0
|
||||
golang.org/x/sys v0.12.0
|
||||
google.golang.org/api v0.141.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.110.6 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v1.1.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect
|
||||
|
|
@ -67,25 +67,26 @@ require (
|
|||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containers/common v0.55.3 // indirect
|
||||
github.com/containers/image/v5 v5.27.0 // indirect
|
||||
github.com/containers/common v0.56.0 // indirect
|
||||
github.com/containers/image/v5 v5.28.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/containers/ocicrypt v1.1.7 // indirect
|
||||
github.com/containers/storage v1.48.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect
|
||||
github.com/containers/ocicrypt v1.1.8 // indirect
|
||||
github.com/containers/storage v1.50.2 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker v24.0.2+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/docker v24.0.6+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/errors v0.20.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.26.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.9 // indirect
|
||||
|
|
@ -96,8 +97,8 @@ require (
|
|||
github.com/golang/glog v1.1.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-containerregistry v0.15.2 // indirect
|
||||
github.com/google/s2a-go v0.1.5 // indirect
|
||||
github.com/google/go-containerregistry v0.16.1 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/gorilla/css v1.0.0 // indirect
|
||||
|
|
@ -116,14 +117,15 @@ require (
|
|||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.6 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.17 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.18 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
|
|
@ -134,19 +136,20 @@ require (
|
|||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4 // indirect
|
||||
github.com/opencontainers/runc v1.1.7 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
|
||||
github.com/opencontainers/runc v1.1.9 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/sigstore/fulcio v1.3.1 // indirect
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect
|
||||
github.com/sigstore/sigstore v1.7.1 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.0 // indirect
|
||||
github.com/sigstore/rekor v1.2.2 // indirect
|
||||
github.com/sigstore/sigstore v1.7.3 // indirect
|
||||
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
|
|
@ -156,29 +159,27 @@ require (
|
|||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
github.com/vbatts/tar-split v0.11.3 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.4.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.7 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.6.1 // indirect
|
||||
go.mongodb.org/mongo-driver v1.11.3 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/crypto v0.13.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.15.0 // indirect
|
||||
golang.org/x/term v0.12.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.9.3 // indirect
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832 // indirect
|
||||
google.golang.org/grpc v1.57.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
|||
200
go.sum
200
go.sum
|
|
@ -38,14 +38,14 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.32.0 h1:5w6DxEGOnktmJHarxAOUywxVW9lbNWIzlzzUltG/3+o=
|
||||
cloud.google.com/go/storage v1.32.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8=
|
||||
cloud.google.com/go/storage v1.33.0 h1:PVrDOkIC8qQVa1P3SXGpQvfuJhN2LHOoyZvWs8D2X5M=
|
||||
cloud.google.com/go/storage v1.33.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
|
|
@ -80,7 +80,6 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
|
|||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
|
|
@ -96,12 +95,11 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-sdk-go v1.44.332 h1:Ze+98F41+LxoJUdsisAFThV+0yYYLYw17/Vt0++nFYM=
|
||||
github.com/aws/aws-sdk-go v1.44.332/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.45.10 h1:GoqAm25t0qrs4rrXAjqt3luZnM9mV0lzfNwzgaCKpm4=
|
||||
github.com/aws/aws-sdk-go v1.45.10/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
|
@ -121,23 +119,18 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
|||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/containers/common v0.55.3 h1:mhNQRU4OgW1wpmmKMFSYRn42+hr8SEVSPFdKML3WZik=
|
||||
github.com/containers/common v0.55.3/go.mod h1:ZKPllYOZ2xj2rgWRdnHHVvWg6ru4BT28En8mO8DMMPk=
|
||||
github.com/containers/image/v5 v5.27.0 h1:4jKVWAa4YurTWUyAWMoC71zJkSylBR7pWd0jqGkukYc=
|
||||
github.com/containers/image/v5 v5.27.0/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA=
|
||||
github.com/containers/common v0.56.0 h1:hysHUsEai1EkMXanU26UV55wMXns/a6AYmaFqJ4fEMY=
|
||||
github.com/containers/common v0.56.0/go.mod h1:IjaDdfUtcs2CfCcJMZxuut4XlvkTkY9Nlqkso9xCOq4=
|
||||
github.com/containers/image/v5 v5.28.0 h1:H4cWbdI88UA/mDb6SxMo3IxpmS1BSs/Kifvhwt9g048=
|
||||
github.com/containers/image/v5 v5.28.0/go.mod h1:9aPnNkwHNHgGl9VlQxXEshvmOJRbdRAc1rNDD6sP2eU=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U=
|
||||
github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw=
|
||||
github.com/containers/storage v1.48.0 h1:wiPs8J2xiFoOEAhxHDRtP6A90Jzj57VqzLRXOqeizns=
|
||||
github.com/containers/storage v1.48.0/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0=
|
||||
github.com/containers/ocicrypt v1.1.8 h1:saSBF0/8DyPUjzcxMVzL2OBUWCkvRvqIm75pu0ADSZk=
|
||||
github.com/containers/ocicrypt v1.1.8/go.mod h1:jM362hyBtbwLMWzXQZTlkjKGAQf/BN/LFMtH0FIRt34=
|
||||
github.com/containers/storage v1.50.2 h1:Fys4BjFUVNRBEXlO70hFI48VW4EXsgnGisTpk9tTMsE=
|
||||
github.com/containers/storage v1.50.2/go.mod h1:dpspZsUrcKD8SpTofvKWhwPDHD0MkO4Q7VE+oYdWkiA=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
|
|
@ -147,8 +140,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
|||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd h1:0av0vtcjA8Hqv5gyWj79CLCFVwOOyBNWPjrfUWceMNg=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
@ -160,10 +153,10 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/
|
|||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg=
|
||||
github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE=
|
||||
github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
|
@ -173,8 +166,6 @@ github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZ
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
|
|
@ -192,6 +183,8 @@ github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJ
|
|||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
|
|
@ -205,14 +198,16 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy
|
|||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc=
|
||||
github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
|
||||
github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
|
||||
github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
|
||||
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
|
||||
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
|
||||
|
|
@ -230,6 +225,7 @@ github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KA
|
|||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
|
|
@ -326,8 +322,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE=
|
||||
github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q=
|
||||
github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
|
||||
github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
|
|
@ -343,8 +339,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg=
|
||||
github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
|
@ -356,13 +352,12 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
|||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||
github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo=
|
||||
github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.6.0 h1:JwJN1bauRnWPba5ueWs9IluONHteXPWjjK+MvfM4krY=
|
||||
github.com/gophercloud/gophercloud v1.6.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
|
||||
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
|
@ -466,8 +461,8 @@ github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaR
|
|||
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
|
||||
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg=
|
||||
|
|
@ -477,6 +472,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv
|
|||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
|
|
@ -524,8 +520,10 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
|
|||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
||||
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
|
|
@ -562,27 +560,26 @@ github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
|||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
|
||||
github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
|
||||
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU=
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM=
|
||||
github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.364 h1:Oaol0hc/5EaGan6zlU+rGEaqu+wI8TbY8/XbWm5mHoU=
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.364/go.mod h1:KYOw8kAKAHyPrJcQoVR82CneQ4ofC02Na4cXXaTq4Nw=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
|
||||
github.com/osbuild/images v0.3.0 h1:eedZbt/9B8UNVfI8TDfFNqX+Psi9YMOdbA1MXcBPHnk=
|
||||
github.com/osbuild/images v0.3.0/go.mod h1:lZsi8oJNfk57VRv5zhdrSLmn0z8YPcQzZBxzdHNfZls=
|
||||
github.com/osbuild/images v0.5.1-0.20230915095808-dd48a38be218 h1:EhsTDn5qIp3MlhzRqD12UtG3SAhikCekjgZSVeZ+2wM=
|
||||
github.com/osbuild/images v0.5.1-0.20230915095808-dd48a38be218/go.mod h1:fgwdEAURXcQjQzNc1igQ3Kn2mSJKNCN6DPOfLoLPWDc=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
|
@ -610,8 +607,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
|
|
@ -622,7 +619,6 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH
|
|||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
|
|
@ -632,24 +628,24 @@ github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OK
|
|||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
|
||||
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI=
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ=
|
||||
github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks=
|
||||
github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg=
|
||||
github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8=
|
||||
github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI=
|
||||
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
|
||||
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
|
||||
github.com/sigstore/sigstore v1.7.3 h1:HVVTfrMezJeLyl2xhJ8edzkrEGBa4KxjQZB4FlQ4JLU=
|
||||
github.com/sigstore/sigstore v1.7.3/go.mod h1:cl0c7Dtg3MM3c13L8pqqrfrmBa0eM3POcdtBepjylmw=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0=
|
||||
|
|
@ -691,17 +687,16 @@ github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453 h1:rN0NwUFS6oK9ESlk2Qy
|
|||
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453/go.mod h1:s59e1aOY3F3KNsRx5W8cMdbtbt49aSKL7alLp6EKn48=
|
||||
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
|
||||
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
|
||||
github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo=
|
||||
github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbauerster/mpb/v8 v8.6.1 h1:XbBpIbJxJOO9yMcKPpI4oEFPW6tLAptefNQJNcGWri8=
|
||||
github.com/vbauerster/mpb/v8 v8.6.1/go.mod h1:S0tuIjikxlLxCeNijNhwAuD/BB3UE/d2nygG8SOldk0=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vmware/govmomi v0.30.7 h1:YO8CcDpLJzmq6PK5/CBQbXyV21iCMh8SbdXt+xNkXp8=
|
||||
|
|
@ -722,14 +717,11 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
|||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
||||
go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
|
||||
go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
|
|
@ -739,7 +731,6 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
|
|
@ -758,25 +749,24 @@ golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaE
|
|||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -787,8 +777,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
|
@ -811,8 +801,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -859,16 +849,16 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
|
||||
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -947,19 +937,18 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -969,11 +958,10 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -1033,8 +1021,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
@ -1059,8 +1047,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0=
|
||||
google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY=
|
||||
google.golang.org/api v0.141.0 h1:Df6vfMgDoIM6ss0m7H4MPwFwY87WNXHfBIda/Bmfl4E=
|
||||
google.golang.org/api v0.141.0/go.mod h1:iZqLkdPlXKyG0b90eu6KxVSE4D/ccRF2e/doKD2CnQQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -1092,7 +1080,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
|
|||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
|
|
@ -1103,8 +1090,8 @@ google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWof
|
|||
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832 h1:o4LtQxebKIJ4vkzyhtD2rfUNZ20Zf0ik5YVP5E7G7VE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
@ -1117,10 +1104,7 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
|
|||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
|
@ -1144,6 +1128,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U=
|
||||
|
|
@ -1151,13 +1136,9 @@ gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEI
|
|||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
@ -1168,7 +1149,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
|||
7
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
|
@ -1,6 +1,13 @@
|
|||
# Changes
|
||||
|
||||
|
||||
## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.32.0...storage/v1.33.0) (2023-09-07)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Export gRPC client constructor ([#8509](https://github.com/googleapis/google-cloud-go/issues/8509)) ([1a928ae](https://github.com/googleapis/google-cloud-go/commit/1a928ae205f2325cb5206304af4d609dc3c1447a))
|
||||
|
||||
## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.31.0...storage/v1.32.0) (2023-08-15)
|
||||
|
||||
|
||||
|
|
|
|||
27
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
27
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
|
|
@ -331,6 +331,33 @@ to add a [custom audit logging] header:
|
|||
// Use client as usual with the context and the additional headers will be sent.
|
||||
client.Bucket("my-bucket").Attrs(ctx)
|
||||
|
||||
# Experimental gRPC API
|
||||
|
||||
This package includes support for the Cloud Storage gRPC API, which is currently
|
||||
in preview. This implementation uses gRPC rather than the current JSON & XML
|
||||
APIs to make requests to Cloud Storage. If you would like to try the API,
|
||||
please contact your GCP account rep for more information. The gRPC API is not
|
||||
yet generally available, so it may be subject to breaking changes.
|
||||
|
||||
To create a client which will use gRPC, use the alternate constructor:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewGRPCClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Use client as usual.
|
||||
|
||||
If the application is running within GCP, users may get better performance by
|
||||
enabling DirectPath (enabling requests to skip some proxy steps). To enable,
|
||||
set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
|
||||
the following side-effect imports to your application:
|
||||
|
||||
import (
|
||||
_ "google.golang.org/grpc/balancer/rls"
|
||||
_ "google.golang.org/grpc/xds/googledirectpath"
|
||||
)
|
||||
|
||||
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
|
||||
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
|
||||
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
|
||||
|
|
|
|||
4
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
4
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
|
|
@ -45,8 +45,6 @@ import (
|
|||
|
||||
// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic
|
||||
// storageClient interface.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
type httpStorageClient struct {
|
||||
creds *google.Credentials
|
||||
hc *http.Client
|
||||
|
|
@ -59,8 +57,6 @@ type httpStorageClient struct {
|
|||
|
||||
// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON
|
||||
// Storage API.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {
|
||||
s := initSettings(opts...)
|
||||
o := s.clientOption
|
||||
|
|
|
|||
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
|
|
@ -15,4 +15,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.32.0"
|
||||
const Version = "1.33.0"
|
||||
|
|
|
|||
29
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
29
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
|
|
@ -123,7 +123,7 @@ type Client struct {
|
|||
useGRPC bool
|
||||
}
|
||||
|
||||
// NewClient creates a new Google Cloud Storage client.
|
||||
// NewClient creates a new Google Cloud Storage client using the HTTP transport.
|
||||
// The default scope is ScopeFullControl. To use a different scope, like
|
||||
// ScopeReadOnly, use option.WithScopes.
|
||||
//
|
||||
|
|
@ -133,12 +133,6 @@ type Client struct {
|
|||
// You may configure the client by passing in options from the [google.golang.org/api/option]
|
||||
// package. You may also use options defined in this package, such as [WithJSONReads].
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
// Use the experimental gRPC client if the env var is set.
|
||||
// This is an experimental API and not intended for public use.
|
||||
if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" {
|
||||
return newGRPCClient(ctx, opts...)
|
||||
}
|
||||
|
||||
var creds *google.Credentials
|
||||
|
||||
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
|
||||
|
|
@ -220,11 +214,20 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
}, nil
|
||||
}
|
||||
|
||||
// newGRPCClient creates a new Storage client that initializes a gRPC-based
|
||||
// client. Calls that have not been implemented in gRPC will panic.
|
||||
// NewGRPCClient creates a new Storage client using the gRPC transport and API.
|
||||
// Client methods which have not been implemented in gRPC will return an error.
|
||||
// In particular, methods for Cloud Pub/Sub notifications are not supported.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
// The storage gRPC API is still in preview and not yet publicly available.
|
||||
// If you would like to use the API, please first contact your GCP account rep to
|
||||
// request access. The API may be subject to breaking changes.
|
||||
//
|
||||
// Clients should be reused instead of created as needed. The methods of Client
|
||||
// are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// You may configure the client by passing in options from the [google.golang.org/api/option]
|
||||
// package.
|
||||
func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
opts = append(defaultGRPCOptions(), opts...)
|
||||
tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...))
|
||||
if err != nil {
|
||||
|
|
@ -2187,8 +2190,6 @@ func toProjectResource(project string) string {
|
|||
|
||||
// setConditionProtoField uses protobuf reflection to set named condition field
|
||||
// to the given condition value if supported on the protobuf message.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func setConditionProtoField(m protoreflect.Message, f string, v int64) bool {
|
||||
fields := m.Descriptor().Fields()
|
||||
if rf := fields.ByName(protoreflect.Name(f)); rf != nil {
|
||||
|
|
@ -2201,8 +2202,6 @@ func setConditionProtoField(m protoreflect.Message, f string, v int64) bool {
|
|||
|
||||
// applyCondsProto validates and attempts to set the conditions on a protobuf
|
||||
// message using protobuf reflection.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error {
|
||||
rmsg := msg.ProtoReflect()
|
||||
|
||||
|
|
|
|||
7
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
7
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
generated
vendored
|
|
@ -1,5 +1,12 @@
|
|||
# Release History
|
||||
|
||||
## 1.6.1 (2023-06-06)
|
||||
|
||||
### Bugs Fixed
|
||||
* Retry policy always clones the underlying `*http.Request` before invoking the next policy.
|
||||
* Added some non-standard error codes to the list of error codes for unregistered resource providers.
|
||||
* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry.
|
||||
|
||||
## 1.6.0 (2023-05-04)
|
||||
|
||||
### Features Added
|
||||
|
|
|
|||
9
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
generated
vendored
9
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
generated
vendored
|
|
@ -76,12 +76,13 @@ type Client struct {
|
|||
}
|
||||
|
||||
// NewClient creates a new Client instance with the provided values.
|
||||
// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans
|
||||
// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider.
|
||||
// if module and package are the same value, the "module/" prefix can be omitted.
|
||||
// - moduleVersion - the semantic version of the containing module; used by the telemetry policy
|
||||
// - plOpts - pipeline configuration options; can be the zero-value
|
||||
// - options - optional client configurations; pass nil to accept the default values
|
||||
func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) {
|
||||
pkg, err := shared.ExtractPackageName(clientName)
|
||||
mod, client, err := shared.ExtractModuleName(clientName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -96,9 +97,9 @@ func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions,
|
|||
}
|
||||
}
|
||||
|
||||
pl := runtime.NewPipeline(pkg, moduleVersion, plOpts, options)
|
||||
pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options)
|
||||
|
||||
tr := options.TracingProvider.NewTracer(clientName, moduleVersion)
|
||||
tr := options.TracingProvider.NewTracer(client, moduleVersion)
|
||||
return &Client{pl: pl, tr: tr}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
generated
vendored
|
|
@ -32,5 +32,5 @@ const (
|
|||
Module = "azcore"
|
||||
|
||||
// Version is the semantic version (see http://semver.org) of this module.
|
||||
Version = "v1.6.0"
|
||||
Version = "v1.6.1"
|
||||
)
|
||||
|
|
|
|||
29
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
generated
vendored
29
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
generated
vendored
|
|
@ -13,7 +13,6 @@ import (
|
|||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -79,14 +78,26 @@ func ValidateModVer(moduleVersion string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ExtractPackageName returns "package" from "package.Client".
|
||||
// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or
|
||||
// "package", "package.Client" from "package.Client" when there's no "module/" prefix.
|
||||
// If clientName is malformed, an error is returned.
|
||||
func ExtractPackageName(clientName string) (string, error) {
|
||||
pkg, client, ok := strings.Cut(clientName, ".")
|
||||
if !ok {
|
||||
return "", fmt.Errorf("missing . in clientName %s", clientName)
|
||||
} else if pkg == "" || client == "" {
|
||||
return "", fmt.Errorf("malformed clientName %s", clientName)
|
||||
func ExtractModuleName(clientName string) (string, string, error) {
|
||||
// uses unnamed capturing for "module", "package.Client", and "package"
|
||||
regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return pkg, nil
|
||||
|
||||
matches := regex.FindStringSubmatch(clientName)
|
||||
if len(matches) < 4 {
|
||||
return "", "", fmt.Errorf("malformed clientName %s", clientName)
|
||||
}
|
||||
|
||||
// the first match is the entire string, the second is "module", the third is
|
||||
// "package.Client" and the fourth is "package".
|
||||
// if there was no "module/" prefix, the second match will be the empty string
|
||||
if matches[1] != "" {
|
||||
return matches[1], matches[2], nil
|
||||
}
|
||||
return matches[3], matches[2], nil
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
generated
vendored
3
vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
generated
vendored
|
|
@ -125,7 +125,8 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
|
|||
}
|
||||
|
||||
if options.TryTimeout == 0 {
|
||||
resp, err = req.Next()
|
||||
clone := req.Clone(req.Raw().Context())
|
||||
resp, err = clone.Next()
|
||||
} else {
|
||||
// Set the per-try time for this particular retry operation and then Do the operation.
|
||||
tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout)
|
||||
|
|
|
|||
4
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go
generated
vendored
Normal file
4
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
// DO NOT EDIT
|
||||
package corehandlers
|
||||
|
||||
const isAwsInternal = ""
|
||||
10
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
|
|
@ -35,3 +35,13 @@ var AddHostExecEnvUserAgentHander = request.NamedHandler{
|
|||
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
|
||||
},
|
||||
}
|
||||
|
||||
var AddAwsInternal = request.NamedHandler{
|
||||
Name: "core.AddAwsInternal",
|
||||
Fn: func(r *request.Request) {
|
||||
if len(isAwsInternal) == 0 {
|
||||
return
|
||||
}
|
||||
request.AddToUserAgent(r, isAwsInternal)
|
||||
},
|
||||
}
|
||||
|
|
|
|||
1
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
|
|
@ -74,6 +74,7 @@ func Handlers() request.Handlers {
|
|||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||
handlers.Build.PushBackNamed(corehandlers.AddAwsInternal)
|
||||
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
|
||||
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
|
|
|
|||
366
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
366
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
|
@ -1058,6 +1058,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -2928,6 +2931,15 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "appmesh.eu-west-3.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "appmesh.il-central-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -5699,6 +5711,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -6270,6 +6285,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -6336,6 +6354,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -6388,6 +6409,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -6454,6 +6478,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -7087,6 +7114,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
|
|
@ -7227,6 +7257,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -8755,6 +8788,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -11358,63 +11394,183 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.af-south-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-east-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-northeast-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-northeast-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-northeast-3.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-south-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-south-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-southeast-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-southeast-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-southeast-3.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ap-southeast-4.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.ca-central-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-central-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-central-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-north-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-south-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-south-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-west-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-west-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.eu-west-3.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips",
|
||||
}: endpoint{
|
||||
|
|
@ -11427,18 +11583,48 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.il-central-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.me-central-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.me-south-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "sa-east-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.sa-east-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.us-east-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
|
|
@ -11457,6 +11643,12 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.us-east-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
Variant: fipsVariant,
|
||||
|
|
@ -11475,6 +11667,12 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.us-west-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
Variant: fipsVariant,
|
||||
|
|
@ -11493,6 +11691,12 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.us-west-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
|
|
@ -13790,6 +13994,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -13805,6 +14012,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -14245,7 +14455,7 @@ var awsPartition = partition{
|
|||
Region: "ca-central-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "internetmonitor-fips.ca-central-1.api.aws",
|
||||
Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
|
|
@ -14316,7 +14526,7 @@ var awsPartition = partition{
|
|||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "internetmonitor-fips.us-east-1.api.aws",
|
||||
Hostname: "internetmonitor-fips.us-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-2",
|
||||
|
|
@ -14327,7 +14537,7 @@ var awsPartition = partition{
|
|||
Region: "us-east-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "internetmonitor-fips.us-east-2.api.aws",
|
||||
Hostname: "internetmonitor-fips.us-east-2.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-1",
|
||||
|
|
@ -14338,7 +14548,7 @@ var awsPartition = partition{
|
|||
Region: "us-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "internetmonitor-fips.us-west-1.api.aws",
|
||||
Hostname: "internetmonitor-fips.us-west-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
|
|
@ -14349,7 +14559,7 @@ var awsPartition = partition{
|
|||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "internetmonitor-fips.us-west-2.api.aws",
|
||||
Hostname: "internetmonitor-fips.us-west-2.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -15478,6 +15688,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -17401,6 +17614,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -17458,6 +17674,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -17997,6 +18216,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -18063,6 +18285,13 @@ var awsPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"managedblockchain-query": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"marketplacecommerceanalytics": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
|
@ -18599,6 +18828,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -20157,6 +20389,14 @@ var awsPartition = partition{
|
|||
Region: "eu-west-3",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{
|
||||
Hostname: "oidc.il-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "il-central-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{
|
||||
|
|
@ -21247,6 +21487,14 @@ var awsPartition = partition{
|
|||
Region: "eu-west-3",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{
|
||||
Hostname: "portal.sso.il-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "il-central-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{
|
||||
|
|
@ -27745,6 +27993,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -29299,6 +29550,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "il-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
|
|
@ -32466,9 +32720,21 @@ var awscnPartition = partition{
|
|||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn",
|
||||
},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
|
|
@ -32626,6 +32892,16 @@ var awscnPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"identitystore": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"internetmonitor": service{
|
||||
Defaults: endpointDefaults{
|
||||
defaultKey{}: endpoint{
|
||||
|
|
@ -35924,6 +36200,12 @@ var awsusgovPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.us-gov-east-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
|
|
@ -35942,6 +36224,12 @@ var awsusgovPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "aos.us-gov-west-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
|
|
@ -36178,6 +36466,28 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"geo": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
|
@ -40198,14 +40508,45 @@ var awsisoPartition = partition{
|
|||
},
|
||||
"elasticmapreduce": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-iso-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-iso-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-iso-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-iso-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-iso-east-1",
|
||||
}: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-iso-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-iso-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-iso-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
|
||||
},
|
||||
},
|
||||
},
|
||||
"es": service{
|
||||
|
|
@ -40991,9 +41332,24 @@ var awsisobPartition = partition{
|
|||
},
|
||||
"elasticmapreduce": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-isob-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-isob-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-isob-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-isob-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
|
||||
},
|
||||
},
|
||||
},
|
||||
"es": service{
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.332"
|
||||
const SDKVersion = "1.45.10"
|
||||
|
|
|
|||
598
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
598
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
|
|
@ -35494,6 +35494,90 @@ func (c *EC2) DisableFastSnapshotRestoresWithContext(ctx aws.Context, input *Dis
|
|||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opDisableImageBlockPublicAccess = "DisableImageBlockPublicAccess"
|
||||
|
||||
// DisableImageBlockPublicAccessRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the DisableImageBlockPublicAccess operation. The "output" return
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfully.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
// the "output" return value is not valid until after Send returns without error.
|
||||
//
|
||||
// See DisableImageBlockPublicAccess for more information on using the DisableImageBlockPublicAccess
|
||||
// API call, and error handling.
|
||||
//
|
||||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the DisableImageBlockPublicAccessRequest method.
|
||||
// req, resp := client.DisableImageBlockPublicAccessRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageBlockPublicAccess
|
||||
func (c *EC2) DisableImageBlockPublicAccessRequest(input *DisableImageBlockPublicAccessInput) (req *request.Request, output *DisableImageBlockPublicAccessOutput) {
|
||||
op := &request.Operation{
|
||||
Name: opDisableImageBlockPublicAccess,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = &DisableImageBlockPublicAccessInput{}
|
||||
}
|
||||
|
||||
output = &DisableImageBlockPublicAccessOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
return
|
||||
}
|
||||
|
||||
// DisableImageBlockPublicAccess API operation for Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Disables block public access for AMIs at the account level in the specified
|
||||
// Amazon Web Services Region. This removes the block public access restriction
|
||||
// from your account. With the restriction removed, you can publicly share your
|
||||
// AMIs in the specified Amazon Web Services Region.
|
||||
//
|
||||
// The API can take up to 10 minutes to configure this setting. During this
|
||||
// time, if you run GetImageBlockPublicAccessState (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetImageBlockPublicAccessState.html),
|
||||
// the response will be block-new-sharing. When the API has completed the configuration,
|
||||
// the response will be unblocked.
|
||||
//
|
||||
// For more information, see Block public access to your AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
//
|
||||
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
|
||||
// API operation DisableImageBlockPublicAccess for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageBlockPublicAccess
|
||||
func (c *EC2) DisableImageBlockPublicAccess(input *DisableImageBlockPublicAccessInput) (*DisableImageBlockPublicAccessOutput, error) {
|
||||
req, out := c.DisableImageBlockPublicAccessRequest(input)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// DisableImageBlockPublicAccessWithContext is the same as DisableImageBlockPublicAccess with the addition of
|
||||
// the ability to pass a context and additional request options.
|
||||
//
|
||||
// See DisableImageBlockPublicAccess for details on how to use this API operation.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *EC2) DisableImageBlockPublicAccessWithContext(ctx aws.Context, input *DisableImageBlockPublicAccessInput, opts ...request.Option) (*DisableImageBlockPublicAccessOutput, error) {
|
||||
req, out := c.DisableImageBlockPublicAccessRequest(input)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opDisableImageDeprecation = "DisableImageDeprecation"
|
||||
|
||||
// DisableImageDeprecationRequest generates a "aws/request.Request" representing the
|
||||
|
|
@ -37504,6 +37588,89 @@ func (c *EC2) EnableFastSnapshotRestoresWithContext(ctx aws.Context, input *Enab
|
|||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opEnableImageBlockPublicAccess = "EnableImageBlockPublicAccess"
|
||||
|
||||
// EnableImageBlockPublicAccessRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the EnableImageBlockPublicAccess operation. The "output" return
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfully.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
// the "output" return value is not valid until after Send returns without error.
|
||||
//
|
||||
// See EnableImageBlockPublicAccess for more information on using the EnableImageBlockPublicAccess
|
||||
// API call, and error handling.
|
||||
//
|
||||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the EnableImageBlockPublicAccessRequest method.
|
||||
// req, resp := client.EnableImageBlockPublicAccessRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageBlockPublicAccess
|
||||
func (c *EC2) EnableImageBlockPublicAccessRequest(input *EnableImageBlockPublicAccessInput) (req *request.Request, output *EnableImageBlockPublicAccessOutput) {
|
||||
op := &request.Operation{
|
||||
Name: opEnableImageBlockPublicAccess,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = &EnableImageBlockPublicAccessInput{}
|
||||
}
|
||||
|
||||
output = &EnableImageBlockPublicAccessOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
return
|
||||
}
|
||||
|
||||
// EnableImageBlockPublicAccess API operation for Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Enables block public access for AMIs at the account level in the specified
|
||||
// Amazon Web Services Region. This prevents the public sharing of your AMIs.
|
||||
// However, if you already have public AMIs, they will remain publicly available.
|
||||
//
|
||||
// The API can take up to 10 minutes to configure this setting. During this
|
||||
// time, if you run GetImageBlockPublicAccessState (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetImageBlockPublicAccessState.html),
|
||||
// the response will be unblocked. When the API has completed the configuration,
|
||||
// the response will be block-new-sharing.
|
||||
//
|
||||
// For more information, see Block public access to your AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
//
|
||||
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
|
||||
// API operation EnableImageBlockPublicAccess for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageBlockPublicAccess
|
||||
func (c *EC2) EnableImageBlockPublicAccess(input *EnableImageBlockPublicAccessInput) (*EnableImageBlockPublicAccessOutput, error) {
|
||||
req, out := c.EnableImageBlockPublicAccessRequest(input)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// EnableImageBlockPublicAccessWithContext is the same as EnableImageBlockPublicAccess with the addition of
|
||||
// the ability to pass a context and additional request options.
|
||||
//
|
||||
// See EnableImageBlockPublicAccess for details on how to use this API operation.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *EC2) EnableImageBlockPublicAccessWithContext(ctx aws.Context, input *EnableImageBlockPublicAccessInput, opts ...request.Option) (*EnableImageBlockPublicAccessOutput, error) {
|
||||
req, out := c.EnableImageBlockPublicAccessRequest(input)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opEnableImageDeprecation = "EnableImageDeprecation"
|
||||
|
||||
// EnableImageDeprecationRequest generates a "aws/request.Request" representing the
|
||||
|
|
@ -39681,6 +39848,83 @@ func (c *EC2) GetHostReservationPurchasePreviewWithContext(ctx aws.Context, inpu
|
|||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opGetImageBlockPublicAccessState = "GetImageBlockPublicAccessState"
|
||||
|
||||
// GetImageBlockPublicAccessStateRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the GetImageBlockPublicAccessState operation. The "output" return
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfully.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
// the "output" return value is not valid until after Send returns without error.
|
||||
//
|
||||
// See GetImageBlockPublicAccessState for more information on using the GetImageBlockPublicAccessState
|
||||
// API call, and error handling.
|
||||
//
|
||||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the GetImageBlockPublicAccessStateRequest method.
|
||||
// req, resp := client.GetImageBlockPublicAccessStateRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetImageBlockPublicAccessState
|
||||
func (c *EC2) GetImageBlockPublicAccessStateRequest(input *GetImageBlockPublicAccessStateInput) (req *request.Request, output *GetImageBlockPublicAccessStateOutput) {
|
||||
op := &request.Operation{
|
||||
Name: opGetImageBlockPublicAccessState,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = &GetImageBlockPublicAccessStateInput{}
|
||||
}
|
||||
|
||||
output = &GetImageBlockPublicAccessStateOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
return
|
||||
}
|
||||
|
||||
// GetImageBlockPublicAccessState API operation for Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Gets the current state of block public access for AMIs at the account level
|
||||
// in the specified Amazon Web Services Region.
|
||||
//
|
||||
// For more information, see Block public access to your AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
//
|
||||
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
|
||||
// API operation GetImageBlockPublicAccessState for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetImageBlockPublicAccessState
|
||||
func (c *EC2) GetImageBlockPublicAccessState(input *GetImageBlockPublicAccessStateInput) (*GetImageBlockPublicAccessStateOutput, error) {
|
||||
req, out := c.GetImageBlockPublicAccessStateRequest(input)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// GetImageBlockPublicAccessStateWithContext is the same as GetImageBlockPublicAccessState with the addition of
|
||||
// the ability to pass a context and additional request options.
|
||||
//
|
||||
// See GetImageBlockPublicAccessState for details on how to use this API operation.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *EC2) GetImageBlockPublicAccessStateWithContext(ctx aws.Context, input *GetImageBlockPublicAccessStateInput, opts ...request.Option) (*GetImageBlockPublicAccessStateOutput, error) {
|
||||
req, out := c.GetImageBlockPublicAccessStateRequest(input)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opGetInstanceTypesFromInstanceRequirements = "GetInstanceTypesFromInstanceRequirements"
|
||||
|
||||
// GetInstanceTypesFromInstanceRequirementsRequest generates a "aws/request.Request" representing the
|
||||
|
|
@ -110469,6 +110713,71 @@ func (s *DisableFastSnapshotRestoresOutput) SetUnsuccessful(v []*DisableFastSnap
|
|||
return s
|
||||
}
|
||||
|
||||
type DisableImageBlockPublicAccessInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Checks whether you have the required permissions for the action, without
|
||||
// actually making the request, and provides an error response. If you have
|
||||
// the required permissions, the error response is DryRunOperation. Otherwise,
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s DisableImageBlockPublicAccessInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s DisableImageBlockPublicAccessInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetDryRun sets the DryRun field's value.
|
||||
func (s *DisableImageBlockPublicAccessInput) SetDryRun(v bool) *DisableImageBlockPublicAccessInput {
|
||||
s.DryRun = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type DisableImageBlockPublicAccessOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Returns unblocked if the request succeeds; otherwise, it returns an error.
|
||||
ImageBlockPublicAccessState *string `locationName:"imageBlockPublicAccessState" type:"string" enum:"ImageBlockPublicAccessDisabledState"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s DisableImageBlockPublicAccessOutput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s DisableImageBlockPublicAccessOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
|
||||
func (s *DisableImageBlockPublicAccessOutput) SetImageBlockPublicAccessState(v string) *DisableImageBlockPublicAccessOutput {
|
||||
s.ImageBlockPublicAccessState = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type DisableImageDeprecationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
|
@ -114949,6 +115258,98 @@ func (s *EnableFastSnapshotRestoresOutput) SetUnsuccessful(v []*EnableFastSnapsh
|
|||
return s
|
||||
}
|
||||
|
||||
type EnableImageBlockPublicAccessInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Checks whether you have the required permissions for the action, without
|
||||
// actually making the request, and provides an error response. If you have
|
||||
// the required permissions, the error response is DryRunOperation. Otherwise,
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// Specify block-new-sharing to enable block public access for AMIs at the account
|
||||
// level in the specified Region. This will block any attempt to publicly share
|
||||
// your AMIs in the specified Region.
|
||||
//
|
||||
// ImageBlockPublicAccessState is a required field
|
||||
ImageBlockPublicAccessState *string `type:"string" required:"true" enum:"ImageBlockPublicAccessEnabledState"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s EnableImageBlockPublicAccessInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s EnableImageBlockPublicAccessInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *EnableImageBlockPublicAccessInput) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "EnableImageBlockPublicAccessInput"}
|
||||
if s.ImageBlockPublicAccessState == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("ImageBlockPublicAccessState"))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDryRun sets the DryRun field's value.
|
||||
func (s *EnableImageBlockPublicAccessInput) SetDryRun(v bool) *EnableImageBlockPublicAccessInput {
|
||||
s.DryRun = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
|
||||
func (s *EnableImageBlockPublicAccessInput) SetImageBlockPublicAccessState(v string) *EnableImageBlockPublicAccessInput {
|
||||
s.ImageBlockPublicAccessState = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type EnableImageBlockPublicAccessOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Returns block-new-sharing if the request succeeds; otherwise, it returns
|
||||
// an error.
|
||||
ImageBlockPublicAccessState *string `locationName:"imageBlockPublicAccessState" type:"string" enum:"ImageBlockPublicAccessEnabledState"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s EnableImageBlockPublicAccessOutput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s EnableImageBlockPublicAccessOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
|
||||
func (s *EnableImageBlockPublicAccessOutput) SetImageBlockPublicAccessState(v string) *EnableImageBlockPublicAccessOutput {
|
||||
s.ImageBlockPublicAccessState = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type EnableImageDeprecationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
|
@ -121252,6 +121653,79 @@ func (s *GetHostReservationPurchasePreviewOutput) SetTotalUpfrontPrice(v string)
|
|||
return s
|
||||
}
|
||||
|
||||
type GetImageBlockPublicAccessStateInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Checks whether you have the required permissions for the action, without
|
||||
// actually making the request, and provides an error response. If you have
|
||||
// the required permissions, the error response is DryRunOperation. Otherwise,
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s GetImageBlockPublicAccessStateInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s GetImageBlockPublicAccessStateInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetDryRun sets the DryRun field's value.
|
||||
func (s *GetImageBlockPublicAccessStateInput) SetDryRun(v bool) *GetImageBlockPublicAccessStateInput {
|
||||
s.DryRun = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type GetImageBlockPublicAccessStateOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The current state of block public access for AMIs at the account level in
|
||||
// the specified Amazon Web Services Region.
|
||||
//
|
||||
// Possible values:
|
||||
//
|
||||
// * block-new-sharing - Any attempt to publicly share your AMIs in the specified
|
||||
// Region is blocked.
|
||||
//
|
||||
// * unblocked - Your AMIs in the specified Region can be publicly shared.
|
||||
ImageBlockPublicAccessState *string `locationName:"imageBlockPublicAccessState" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s GetImageBlockPublicAccessStateOutput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s GetImageBlockPublicAccessStateOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
|
||||
func (s *GetImageBlockPublicAccessStateOutput) SetImageBlockPublicAccessState(v string) *GetImageBlockPublicAccessStateOutput {
|
||||
s.ImageBlockPublicAccessState = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type GetInstanceTypesFromInstanceRequirementsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
|
@ -184799,6 +185273,30 @@ func ImageAttributeName_Values() []string {
|
|||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ImageBlockPublicAccessDisabledStateUnblocked is a ImageBlockPublicAccessDisabledState enum value
|
||||
ImageBlockPublicAccessDisabledStateUnblocked = "unblocked"
|
||||
)
|
||||
|
||||
// ImageBlockPublicAccessDisabledState_Values returns all elements of the ImageBlockPublicAccessDisabledState enum
|
||||
func ImageBlockPublicAccessDisabledState_Values() []string {
|
||||
return []string{
|
||||
ImageBlockPublicAccessDisabledStateUnblocked,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ImageBlockPublicAccessEnabledStateBlockNewSharing is a ImageBlockPublicAccessEnabledState enum value
|
||||
ImageBlockPublicAccessEnabledStateBlockNewSharing = "block-new-sharing"
|
||||
)
|
||||
|
||||
// ImageBlockPublicAccessEnabledState_Values returns all elements of the ImageBlockPublicAccessEnabledState enum
|
||||
func ImageBlockPublicAccessEnabledState_Values() []string {
|
||||
return []string{
|
||||
ImageBlockPublicAccessEnabledStateBlockNewSharing,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ImageStatePending is a ImageState enum value
|
||||
ImageStatePending = "pending"
|
||||
|
|
@ -187295,6 +187793,78 @@ const (
|
|||
|
||||
// InstanceTypeHpc7a96xlarge is a InstanceType enum value
|
||||
InstanceTypeHpc7a96xlarge = "hpc7a.96xlarge"
|
||||
|
||||
// InstanceTypeC7gdMedium is a InstanceType enum value
|
||||
InstanceTypeC7gdMedium = "c7gd.medium"
|
||||
|
||||
// InstanceTypeC7gdLarge is a InstanceType enum value
|
||||
InstanceTypeC7gdLarge = "c7gd.large"
|
||||
|
||||
// InstanceTypeC7gdXlarge is a InstanceType enum value
|
||||
InstanceTypeC7gdXlarge = "c7gd.xlarge"
|
||||
|
||||
// InstanceTypeC7gd2xlarge is a InstanceType enum value
|
||||
InstanceTypeC7gd2xlarge = "c7gd.2xlarge"
|
||||
|
||||
// InstanceTypeC7gd4xlarge is a InstanceType enum value
|
||||
InstanceTypeC7gd4xlarge = "c7gd.4xlarge"
|
||||
|
||||
// InstanceTypeC7gd8xlarge is a InstanceType enum value
|
||||
InstanceTypeC7gd8xlarge = "c7gd.8xlarge"
|
||||
|
||||
// InstanceTypeC7gd12xlarge is a InstanceType enum value
|
||||
InstanceTypeC7gd12xlarge = "c7gd.12xlarge"
|
||||
|
||||
// InstanceTypeC7gd16xlarge is a InstanceType enum value
|
||||
InstanceTypeC7gd16xlarge = "c7gd.16xlarge"
|
||||
|
||||
// InstanceTypeM7gdMedium is a InstanceType enum value
|
||||
InstanceTypeM7gdMedium = "m7gd.medium"
|
||||
|
||||
// InstanceTypeM7gdLarge is a InstanceType enum value
|
||||
InstanceTypeM7gdLarge = "m7gd.large"
|
||||
|
||||
// InstanceTypeM7gdXlarge is a InstanceType enum value
|
||||
InstanceTypeM7gdXlarge = "m7gd.xlarge"
|
||||
|
||||
// InstanceTypeM7gd2xlarge is a InstanceType enum value
|
||||
InstanceTypeM7gd2xlarge = "m7gd.2xlarge"
|
||||
|
||||
// InstanceTypeM7gd4xlarge is a InstanceType enum value
|
||||
InstanceTypeM7gd4xlarge = "m7gd.4xlarge"
|
||||
|
||||
// InstanceTypeM7gd8xlarge is a InstanceType enum value
|
||||
InstanceTypeM7gd8xlarge = "m7gd.8xlarge"
|
||||
|
||||
// InstanceTypeM7gd12xlarge is a InstanceType enum value
|
||||
InstanceTypeM7gd12xlarge = "m7gd.12xlarge"
|
||||
|
||||
// InstanceTypeM7gd16xlarge is a InstanceType enum value
|
||||
InstanceTypeM7gd16xlarge = "m7gd.16xlarge"
|
||||
|
||||
// InstanceTypeR7gdMedium is a InstanceType enum value
|
||||
InstanceTypeR7gdMedium = "r7gd.medium"
|
||||
|
||||
// InstanceTypeR7gdLarge is a InstanceType enum value
|
||||
InstanceTypeR7gdLarge = "r7gd.large"
|
||||
|
||||
// InstanceTypeR7gdXlarge is a InstanceType enum value
|
||||
InstanceTypeR7gdXlarge = "r7gd.xlarge"
|
||||
|
||||
// InstanceTypeR7gd2xlarge is a InstanceType enum value
|
||||
InstanceTypeR7gd2xlarge = "r7gd.2xlarge"
|
||||
|
||||
// InstanceTypeR7gd4xlarge is a InstanceType enum value
|
||||
InstanceTypeR7gd4xlarge = "r7gd.4xlarge"
|
||||
|
||||
// InstanceTypeR7gd8xlarge is a InstanceType enum value
|
||||
InstanceTypeR7gd8xlarge = "r7gd.8xlarge"
|
||||
|
||||
// InstanceTypeR7gd12xlarge is a InstanceType enum value
|
||||
InstanceTypeR7gd12xlarge = "r7gd.12xlarge"
|
||||
|
||||
// InstanceTypeR7gd16xlarge is a InstanceType enum value
|
||||
InstanceTypeR7gd16xlarge = "r7gd.16xlarge"
|
||||
)
|
||||
|
||||
// InstanceType_Values returns all elements of the InstanceType enum
|
||||
|
|
@ -187996,6 +188566,30 @@ func InstanceType_Values() []string {
|
|||
InstanceTypeHpc7a24xlarge,
|
||||
InstanceTypeHpc7a48xlarge,
|
||||
InstanceTypeHpc7a96xlarge,
|
||||
InstanceTypeC7gdMedium,
|
||||
InstanceTypeC7gdLarge,
|
||||
InstanceTypeC7gdXlarge,
|
||||
InstanceTypeC7gd2xlarge,
|
||||
InstanceTypeC7gd4xlarge,
|
||||
InstanceTypeC7gd8xlarge,
|
||||
InstanceTypeC7gd12xlarge,
|
||||
InstanceTypeC7gd16xlarge,
|
||||
InstanceTypeM7gdMedium,
|
||||
InstanceTypeM7gdLarge,
|
||||
InstanceTypeM7gdXlarge,
|
||||
InstanceTypeM7gd2xlarge,
|
||||
InstanceTypeM7gd4xlarge,
|
||||
InstanceTypeM7gd8xlarge,
|
||||
InstanceTypeM7gd12xlarge,
|
||||
InstanceTypeM7gd16xlarge,
|
||||
InstanceTypeR7gdMedium,
|
||||
InstanceTypeR7gdLarge,
|
||||
InstanceTypeR7gdXlarge,
|
||||
InstanceTypeR7gd2xlarge,
|
||||
InstanceTypeR7gd4xlarge,
|
||||
InstanceTypeR7gd8xlarge,
|
||||
InstanceTypeR7gd12xlarge,
|
||||
InstanceTypeR7gd16xlarge,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -188944,6 +189538,9 @@ const (
|
|||
|
||||
// LocationTypeAvailabilityZoneId is a LocationType enum value
|
||||
LocationTypeAvailabilityZoneId = "availability-zone-id"
|
||||
|
||||
// LocationTypeOutpost is a LocationType enum value
|
||||
LocationTypeOutpost = "outpost"
|
||||
)
|
||||
|
||||
// LocationType_Values returns all elements of the LocationType enum
|
||||
|
|
@ -188952,6 +189549,7 @@ func LocationType_Values() []string {
|
|||
LocationTypeRegion,
|
||||
LocationTypeAvailabilityZone,
|
||||
LocationTypeAvailabilityZoneId,
|
||||
LocationTypeOutpost,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
1
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
1
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
|
|
@ -74,7 +74,6 @@ func IsErrorRetryable(err error) bool {
|
|||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
|
||||
case errcode.Error:
|
||||
switch e.Code {
|
||||
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied,
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
3
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
|
|
@ -286,7 +286,8 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
|||
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
|
||||
}
|
||||
if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
|
||||
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
30
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
30
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
|
|
@ -133,6 +133,10 @@ type Options struct {
|
|||
// Invalid when copying a non-multi-architecture image. That will probably
|
||||
// change in the future.
|
||||
EnsureCompressionVariantsExist []OptionCompressionVariant
|
||||
// ForceCompressionFormat ensures that the compression algorithm set in
|
||||
// DestinationCtx.CompressionFormat is used exclusively, and blobs of other
|
||||
// compression algorithms are not reused.
|
||||
ForceCompressionFormat bool
|
||||
}
|
||||
|
||||
// OptionCompressionVariant allows to supply information about
|
||||
|
|
@ -163,6 +167,14 @@ type copier struct {
|
|||
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
|
||||
}
|
||||
|
||||
// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions
|
||||
func shouldRequireCompressionFormatMatch(options *Options) (bool, error) {
|
||||
if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) {
|
||||
return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format")
|
||||
}
|
||||
return options.ForceCompressionFormat, nil
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
// source image admissibility. It returns the manifest which was written to
|
||||
// the new copy of the image.
|
||||
|
|
@ -230,11 +242,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
|
||||
unparsedToplevel: image.UnparsedInstance(rawSource, nil),
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more).
|
||||
// Conceptually the cache settings should be in copy.Options instead.
|
||||
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
|
||||
}
|
||||
defer c.close()
|
||||
c.blobInfoCache.Open()
|
||||
defer c.blobInfoCache.Close()
|
||||
|
||||
// Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
|
||||
if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
|
||||
|
|
@ -269,8 +283,12 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
if len(options.EnsureCompressionVariantsExist) > 0 {
|
||||
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
|
||||
}
|
||||
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The simple case: just copy a single image.
|
||||
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
|
||||
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -279,6 +297,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
if len(options.EnsureCompressionVariantsExist) > 0 {
|
||||
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
|
||||
}
|
||||
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
|
||||
// matches the current system to copy, and copy it.
|
||||
mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
|
||||
|
|
@ -295,7 +317,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
|
||||
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copying system image from manifest list: %w", err)
|
||||
}
|
||||
|
|
|
|||
15
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
15
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
|
|
@ -32,6 +32,10 @@ type instanceCopy struct {
|
|||
op instanceCopyKind
|
||||
sourceDigest digest.Digest
|
||||
|
||||
// Fields which can be used by callers when operation
|
||||
// is `instanceCopyCopy`
|
||||
copyForceCompressionFormat bool
|
||||
|
||||
// Fields which can be used by callers when operation
|
||||
// is `instanceCopyClone`
|
||||
cloneCompressionVariant OptionCompressionVariant
|
||||
|
|
@ -122,9 +126,14 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.
|
|||
if err != nil {
|
||||
return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
|
||||
}
|
||||
forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, instanceCopy{
|
||||
op: instanceCopyCopy,
|
||||
sourceDigest: instanceDigest,
|
||||
op: instanceCopyCopy,
|
||||
sourceDigest: instanceDigest,
|
||||
copyForceCompressionFormat: forceCompressionFormat,
|
||||
})
|
||||
platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
|
||||
compressionList := compressionsByPlatform[platform]
|
||||
|
|
@ -230,7 +239,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
|
|||
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
|
||||
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: false})
|
||||
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
|
||||
}
|
||||
|
|
|
|||
5
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
5
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
|
|
@ -84,6 +84,8 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
|
|||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
||||
decor.Name(" | "),
|
||||
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
|
@ -94,6 +96,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
|
|||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
|
||||
),
|
||||
)
|
||||
}
|
||||
return &progressBar{
|
||||
|
|
|
|||
35
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
35
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
|
|
@ -161,7 +161,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
|||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig != nil) || c.options.OciEncryptLayers != nil
|
||||
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil
|
||||
|
||||
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
|
||||
srcMIMEType: ic.src.ManifestMIMEType,
|
||||
|
|
@ -305,18 +305,18 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
|||
options := newOrderedSet()
|
||||
match := false
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
// Waiting for https://github.com/opencontainers/image-spec/pull/777 :
|
||||
// This currently can’t use image.MatchesPlatform because we don’t know what to use
|
||||
// for image.Variant.
|
||||
if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
|
||||
// For a transitional period, this might trigger warnings because the Variant
|
||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||
// revert this check to only look for (OS, Architecture).
|
||||
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
|
||||
options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant))
|
||||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
|
||||
c.OS, c.Architecture, strings.Join(options.list, ", "))
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -360,6 +360,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
|||
logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
|
||||
return nil, nil
|
||||
}
|
||||
defer destImageSource.Close()
|
||||
|
||||
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
|
||||
if err != nil {
|
||||
|
|
@ -459,8 +460,14 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
|
|||
encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
|
||||
totalLayers := len(srcInfos)
|
||||
for _, l := range *ic.c.options.OciEncryptLayers {
|
||||
// if layer is negative, it is reverse indexed.
|
||||
layersToEncrypt.Add((totalLayers + l) % totalLayers)
|
||||
switch {
|
||||
case l >= 0 && l < totalLayers:
|
||||
layersToEncrypt.Add(l)
|
||||
case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers
|
||||
layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed.
|
||||
default:
|
||||
return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers)
|
||||
}
|
||||
}
|
||||
|
||||
if encryptAll {
|
||||
|
|
@ -655,8 +662,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
diffIDIsNeeded := false
|
||||
var cachedDiffID digest.Digest = ""
|
||||
if ic.diffIDsAreNeeded {
|
||||
cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded = cachedDiffID == ""
|
||||
}
|
||||
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
|
||||
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
|
||||
// but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
|
||||
|
|
|
|||
86
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
86
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
|
|
@ -1,7 +1,6 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
|
|
@ -19,6 +18,7 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/useragent"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
|
|
@ -121,6 +121,9 @@ type dockerClient struct {
|
|||
// Private state for detectProperties:
|
||||
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
|
||||
detectPropertiesError error // detectPropertiesError caches the initial error.
|
||||
// Private state for logResponseWarnings
|
||||
reportedWarningsLock sync.Mutex
|
||||
reportedWarnings *set.Set[string]
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
|
|
@ -281,10 +284,11 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
|||
}
|
||||
|
||||
return &dockerClient{
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
userAgent: userAgent,
|
||||
tlsClientConfig: tlsClientConfig,
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
userAgent: userAgent,
|
||||
tlsClientConfig: tlsClientConfig,
|
||||
reportedWarnings: set.New[string](),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -624,9 +628,76 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if warnings := res.Header.Values("Warning"); len(warnings) != 0 {
|
||||
c.logResponseWarnings(res, warnings)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// logResponseWarnings logs warningHeaders from res, if any.
|
||||
func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) {
|
||||
c.reportedWarningsLock.Lock()
|
||||
defer c.reportedWarningsLock.Unlock()
|
||||
|
||||
for _, header := range warningHeaders {
|
||||
warningString := parseRegistryWarningHeader(header)
|
||||
if warningString == "" {
|
||||
logrus.Debugf("Ignored Warning: header from registry: %q", header)
|
||||
} else {
|
||||
if !c.reportedWarnings.Contains(warningString) {
|
||||
c.reportedWarnings.Add(warningString)
|
||||
// Note that reportedWarnings is based only on warningString, so that we don’t
|
||||
// repeat the same warning for every request - but the warning includes the URL;
|
||||
// so it may not be specific to that URL.
|
||||
logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString)
|
||||
} else {
|
||||
logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning
|
||||
// values allowed by opencontainers/distribution-spec.
|
||||
// It returns the warning string if the header has the expected format, or "" otherwise.
|
||||
func parseRegistryWarningHeader(header string) string {
|
||||
const expectedPrefix = `299 - "`
|
||||
const expectedSuffix = `"`
|
||||
|
||||
// warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
|
||||
// distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
|
||||
if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) {
|
||||
return ""
|
||||
}
|
||||
header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)]
|
||||
|
||||
// ”Recipients that process the value of a quoted-string MUST handle a quoted-pair
|
||||
// as if it were replaced by the octet following the backslash.”, so let’s do that…
|
||||
res := strings.Builder{}
|
||||
afterBackslash := false
|
||||
for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points
|
||||
switch {
|
||||
case c == 0x7F || (c < ' ' && c != '\t'):
|
||||
return "" // Control characters are forbidden
|
||||
case afterBackslash:
|
||||
res.WriteByte(c)
|
||||
afterBackslash = false
|
||||
case c == '"':
|
||||
// This terminates the warn-text and warn-date, forbidden by distribution-spec, follows,
|
||||
// or completely invalid input.
|
||||
return ""
|
||||
case c == '\\':
|
||||
afterBackslash = true
|
||||
default:
|
||||
res.WriteByte(c)
|
||||
}
|
||||
}
|
||||
if afterBackslash {
|
||||
return ""
|
||||
}
|
||||
return res.String()
|
||||
}
|
||||
|
||||
// we're using the challenges from the /v2/ ping response and not the one from the destination
|
||||
// URL in this request because:
|
||||
//
|
||||
|
|
@ -1008,9 +1079,10 @@ func isManifestUnknownError(err error) bool {
|
|||
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
|
||||
return true
|
||||
}
|
||||
// ALSO registry.redhat.io as of October 2022
|
||||
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
|
||||
// but specifies that the HTTP status must be 404.
|
||||
var unexpected *unexpectedHTTPResponseError
|
||||
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) {
|
||||
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
|
|||
5
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
|
|
@ -367,6 +367,11 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
|
||||
// (the "from" parameter); in that case we might try to use these candidates as well.
|
||||
//
|
||||
// OTOH that would mean we can’t do the “blobExists” check, and if there is no match
|
||||
// we could get an upload request that we would have to cancel.
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
7
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
7
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
|
|
@ -47,7 +47,12 @@ func httpResponseToError(res *http.Response, context string) error {
|
|||
}
|
||||
|
||||
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
|
||||
// registry
|
||||
// registry.
|
||||
//
|
||||
// WARNING: The OCI distribution spec says
|
||||
// “A `4XX` response code from the registry MAY return a body in any format.”; but if it is
|
||||
// JSON, it MUST use the errcode.Error structure.
|
||||
// So, callers should primarily decide based on HTTP StatusCode, not based on error type here.
|
||||
func registryHTTPResponseToError(res *http.Response) error {
|
||||
err := handleErrorResponse(res)
|
||||
// len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is.
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
|
|
@ -57,7 +57,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
|||
// The caller should call .Close() on the returned archive when done.
|
||||
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating temporary file: %w", err)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
generated
vendored
|
|
@ -40,7 +40,7 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
|
|||
// then in its parent "docker.io/library"; in none of "busybox",
|
||||
// un-namespaced "library" nor in "" supposedly implicitly representing "library/".
|
||||
//
|
||||
// ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
|
||||
// ref.Name() == ref.Domain() + "/" + ref.Path(), so the last
|
||||
// iteration matches the host name (for any namespace).
|
||||
res := []string{}
|
||||
name := ref.Name()
|
||||
|
|
|
|||
6
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
|
|
@ -23,6 +23,12 @@ type v1OnlyBlobInfoCache struct {
|
|||
types.BlobInfoCache
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) Open() {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) Close() {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
}
|
||||
|
||||
|
|
|
|||
7
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
|
|
@ -18,6 +18,13 @@ const (
|
|||
// of compression was applied to the blobs it keeps information about.
|
||||
type BlobInfoCache2 interface {
|
||||
types.BlobInfoCache
|
||||
|
||||
// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
|
||||
// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
|
||||
Open()
|
||||
// Close destroys state created by Open().
|
||||
Close()
|
||||
|
||||
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||
// or Uncompressed or UnknownCompression.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||
|
|
|
|||
66
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
66
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
|
|
@ -12,8 +12,10 @@ import (
|
|||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type manifestOCI1 struct {
|
||||
|
|
@ -86,7 +88,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
|||
// old image manifests work (docker v2s1 especially).
|
||||
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
|
||||
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
|
||||
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
|
||||
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
|
||||
}
|
||||
|
||||
cb, err := m.ConfigBlob(ctx)
|
||||
|
|
@ -194,26 +196,72 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
|
|||
return m.convertToManifestSchema2(ctx, options)
|
||||
}
|
||||
|
||||
// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions.
|
||||
// If not, it returns (nil, nil).
|
||||
// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
|
||||
// and edits *options to not try decryption again.
|
||||
func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
|
||||
if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool {
|
||||
return info.CryptoOperation == types.Decrypt
|
||||
}) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
originalInfos := m.LayerInfos()
|
||||
if len(originalInfos) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
|
||||
}
|
||||
|
||||
res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate.
|
||||
updatedEdits := slices.Clone(options.LayerInfos)
|
||||
for i, info := range options.LayerInfos {
|
||||
if info.CryptoOperation == types.Decrypt {
|
||||
res[i].CryptoOperation = types.Decrypt
|
||||
updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
|
||||
}
|
||||
// Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
|
||||
res[i].CompressionOperation = types.PreserveOriginal
|
||||
res[i].CompressionAlgorithm = nil
|
||||
}
|
||||
options.LayerInfos = updatedEdits
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestOCI1 object.
|
||||
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
|
||||
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
|
||||
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
|
||||
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
|
||||
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
|
||||
}
|
||||
|
||||
// Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
|
||||
// which remove OCI-specific features, because trying to convert those layers would fail.
|
||||
// So, do the layer updates for decryption.
|
||||
ociManifest := m.m
|
||||
layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if layerDecryptEdits != nil {
|
||||
ociManifest = manifest.OCI1Clone(ociManifest)
|
||||
if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create a copy of the descriptor.
|
||||
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
|
||||
config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config)
|
||||
|
||||
// Above, we have already checked that this manifest refers to an image, not an OCI artifact,
|
||||
// so the only difference between OCI and DockerSchema2 is the mediatypes. The
|
||||
// media type of the manifest is handled by manifestSchema2FromComponents.
|
||||
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
|
||||
|
||||
layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
|
||||
layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers))
|
||||
for idx := range layers {
|
||||
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
|
||||
layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx])
|
||||
switch layers[idx].MediaType {
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
|
||||
|
|
@ -227,6 +275,10 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
|
|||
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
|
||||
// FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released
|
||||
case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
|
||||
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc:
|
||||
return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
|
||||
}
|
||||
|
|
@ -244,7 +296,7 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
|
|||
// This does not change the state of the original manifestOCI1 object.
|
||||
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
|
||||
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
|
||||
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
|
||||
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
|
||||
}
|
||||
|
||||
// We can't directly convert images to V1, but we can transitively convert via a V2 image
|
||||
|
|
|
|||
66
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
66
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
|
|
@ -64,13 +64,8 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
|||
MediaType: manifest.MediaType,
|
||||
}
|
||||
ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName}
|
||||
ret.ReadOnly.Platform = &imgspecv1.Platform{
|
||||
OS: manifest.Platform.OS,
|
||||
Architecture: manifest.Platform.Architecture,
|
||||
OSVersion: manifest.Platform.OSVersion,
|
||||
OSFeatures: manifest.Platform.OSFeatures,
|
||||
Variant: manifest.Platform.Variant,
|
||||
}
|
||||
platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
|
||||
ret.ReadOnly.Platform = &platform
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
|
@ -119,17 +114,20 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
|||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
addInstance := Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType},
|
||||
Schema2PlatformSpec{
|
||||
OS: editInstance.AddPlatform.OS,
|
||||
Architecture: editInstance.AddPlatform.Architecture,
|
||||
OSVersion: editInstance.AddPlatform.OSVersion,
|
||||
OSFeatures: editInstance.AddPlatform.OSFeatures,
|
||||
Variant: editInstance.AddPlatform.Variant,
|
||||
},
|
||||
if editInstance.AddPlatform == nil {
|
||||
// Should we create a struct with empty fields instead?
|
||||
// Right now ListOpAdd is only called when an instance with the same platform value
|
||||
// already exists in the manifest, so this should not be reached in practice.
|
||||
return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported")
|
||||
}
|
||||
addedEntries = append(addedEntries, addInstance)
|
||||
addedEntries = append(addedEntries, Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{
|
||||
Digest: editInstance.AddDigest,
|
||||
Size: editInstance.AddSize,
|
||||
MediaType: editInstance.AddMediaType,
|
||||
},
|
||||
schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform),
|
||||
})
|
||||
default:
|
||||
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
|
||||
}
|
||||
|
|
@ -158,13 +156,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.
|
|||
}
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
for _, d := range list.Manifests {
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
OSVersion: d.Platform.OSVersion,
|
||||
OSFeatures: slices.Clone(d.Platform.OSFeatures),
|
||||
Variant: d.Platform.Variant,
|
||||
}
|
||||
imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
|
||||
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
|
||||
return d.Digest, nil
|
||||
}
|
||||
|
|
@ -224,20 +216,14 @@ func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
|
|||
func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
|
||||
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
|
||||
for _, manifest := range list.Manifests {
|
||||
converted := imgspecv1.Descriptor{
|
||||
platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
|
||||
components = append(components, imgspecv1.Descriptor{
|
||||
MediaType: manifest.MediaType,
|
||||
Size: manifest.Size,
|
||||
Digest: manifest.Digest,
|
||||
URLs: slices.Clone(manifest.URLs),
|
||||
Platform: &imgspecv1.Platform{
|
||||
OS: manifest.Platform.OS,
|
||||
Architecture: manifest.Platform.Architecture,
|
||||
OSFeatures: slices.Clone(manifest.Platform.OSFeatures),
|
||||
OSVersion: manifest.Platform.OSVersion,
|
||||
Variant: manifest.Platform.Variant,
|
||||
},
|
||||
}
|
||||
components = append(components, converted)
|
||||
Platform: &platform,
|
||||
})
|
||||
}
|
||||
oci := OCI1IndexPublicFromComponents(components, nil)
|
||||
return oci, nil
|
||||
|
|
@ -312,3 +298,15 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
|
|||
}
|
||||
return schema2ListFromPublic(public), nil
|
||||
}
|
||||
|
||||
// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture.
|
||||
func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform {
|
||||
return imgspecv1.Platform{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
OSVersion: p.OSVersion,
|
||||
OSFeatures: slices.Clone(p.OSFeatures),
|
||||
Variant: p.Variant,
|
||||
// Features is not supported in OCI, and discarded.
|
||||
}
|
||||
}
|
||||
|
|
|
|||
22
vendor/github.com/containers/image/v5/internal/manifest/errors.go
generated
vendored
22
vendor/github.com/containers/image/v5/internal/manifest/errors.go
generated
vendored
|
|
@ -1,6 +1,10 @@
|
|||
package manifest
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
|
||||
// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
|
||||
|
|
@ -26,8 +30,20 @@ type NonImageArtifactError struct {
|
|||
mimeType string
|
||||
}
|
||||
|
||||
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType.
|
||||
func NewNonImageArtifactError(mimeType string) error {
|
||||
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest.
|
||||
//
|
||||
// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig .
|
||||
func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error {
|
||||
// Callers decide based on manifest.Config.MediaType that this is not an image;
|
||||
// in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically
|
||||
// more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON.
|
||||
//
|
||||
// If ArtifactType and Config.MediaType are both defined and non-trivial, presumably
|
||||
// ArtifactType is the “top-level” one, although that’s not defined by the spec.
|
||||
mimeType := manifest.ArtifactType
|
||||
if mimeType == "" {
|
||||
mimeType = manifest.Config.MediaType
|
||||
}
|
||||
return NonImageArtifactError{mimeType: mimeType}
|
||||
}
|
||||
|
||||
|
|
|
|||
74
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
74
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
|
|
@ -170,8 +170,19 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
|
|||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
}
|
||||
if len(addedEntries) != 0 || updatedAnnotations {
|
||||
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool {
|
||||
return !instanceIsZstd(a) && instanceIsZstd(b)
|
||||
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
|
||||
// FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes
|
||||
// into more algorithms?
|
||||
aZstd := instanceIsZstd(a)
|
||||
bZstd := instanceIsZstd(b)
|
||||
switch {
|
||||
case aZstd == bZstd:
|
||||
return 0
|
||||
case !aZstd: // Implies bZstd
|
||||
return -1
|
||||
default: // aZstd && !bZstd
|
||||
return 1
|
||||
}
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
|
@ -228,13 +239,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
|||
for manifestIndex, d := range index.Manifests {
|
||||
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
|
||||
if d.Platform != nil {
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
OSVersion: d.Platform.OSVersion,
|
||||
OSFeatures: slices.Clone(d.Platform.OSFeatures),
|
||||
Variant: d.Platform.Variant,
|
||||
}
|
||||
imagePlatform := ociPlatformClone(*d.Platform)
|
||||
platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
|
||||
return platform.MatchesPlatform(imagePlatform, wantedPlatform)
|
||||
})
|
||||
|
|
@ -288,13 +293,8 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
|
|||
for i, component := range components {
|
||||
var platform *imgspecv1.Platform
|
||||
if component.Platform != nil {
|
||||
platform = &imgspecv1.Platform{
|
||||
Architecture: component.Platform.Architecture,
|
||||
OS: component.Platform.OS,
|
||||
OSVersion: component.Platform.OSVersion,
|
||||
OSFeatures: slices.Clone(component.Platform.OSFeatures),
|
||||
Variant: component.Platform.Variant,
|
||||
}
|
||||
platformCopy := ociPlatformClone(*component.Platform)
|
||||
platform = &platformCopy
|
||||
}
|
||||
m := imgspecv1.Descriptor{
|
||||
MediaType: component.MediaType,
|
||||
|
|
@ -331,22 +331,15 @@ func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
|
|||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
}
|
||||
converted := Schema2ManifestDescriptor{
|
||||
components = append(components, Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{
|
||||
MediaType: manifest.MediaType,
|
||||
Size: manifest.Size,
|
||||
Digest: manifest.Digest,
|
||||
URLs: slices.Clone(manifest.URLs),
|
||||
},
|
||||
Schema2PlatformSpec{
|
||||
OS: platform.OS,
|
||||
Architecture: platform.Architecture,
|
||||
OSFeatures: slices.Clone(platform.OSFeatures),
|
||||
OSVersion: platform.OSVersion,
|
||||
Variant: platform.Variant,
|
||||
},
|
||||
}
|
||||
components = append(components, converted)
|
||||
schema2PlatformSpecFromOCIPlatform(*platform),
|
||||
})
|
||||
}
|
||||
s2 := Schema2ListPublicFromComponents(components)
|
||||
return s2, nil
|
||||
|
|
@ -420,3 +413,32 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
|
|||
}
|
||||
return oci1IndexFromPublic(public), nil
|
||||
}
|
||||
|
||||
// ociPlatformClone returns an independent copy of p.
|
||||
func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform {
|
||||
// The only practical way in Go to give read-only access to an array is to copy it.
|
||||
// The only practical way in Go to copy a deep structure is to either do it manually field by field,
|
||||
// or to use reflection (incl. a round-trip through JSON, which uses reflection).
|
||||
//
|
||||
// The combination of the two is just sad, and leads to code like this, which will
|
||||
// need to be updated with every new Platform field.
|
||||
return imgspecv1.Platform{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
OSVersion: p.OSVersion,
|
||||
OSFeatures: slices.Clone(p.OSFeatures),
|
||||
Variant: p.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure.
|
||||
func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec {
|
||||
return Schema2PlatformSpec{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
OSVersion: p.OSVersion,
|
||||
OSFeatures: slices.Clone(p.OSFeatures),
|
||||
Variant: p.Variant,
|
||||
Features: nil,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
4
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
|
|
@ -128,6 +128,10 @@ var compatibility = map[string][]string{
|
|||
// the most compatible platform is first.
|
||||
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
||||
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
// Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
|
||||
// The fields are not specified by the OCI specification, as of version 1.1, usefully enough
|
||||
// to be interoperable, anyway.
|
||||
|
||||
wantedArch := runtime.GOARCH
|
||||
wantedVariant := ""
|
||||
if ctx != nil && ctx.ArchitectureChoice != "" {
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
generated
vendored
|
|
@ -15,7 +15,7 @@ import (
|
|||
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
|
||||
// If an error occurs, inputInfo is not modified.
|
||||
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
|
||||
diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
|
||||
diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
|
||||
}
|
||||
|
|
|
|||
12
vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
generated
vendored
12
vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
generated
vendored
|
|
@ -17,10 +17,12 @@ var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles
|
|||
// DO NOT change this, instead see unixTempDirForBigFiles above.
|
||||
const builtinUnixTempDirForBigFiles = "/var/tmp"
|
||||
|
||||
const prefix = "container_images_"
|
||||
|
||||
// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
|
||||
// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
|
||||
// which on systemd based systems could be the unsuitable tmpfs filesystem.
|
||||
func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
|
||||
func temporaryDirectoryForBigFiles(sys *types.SystemContext) string {
|
||||
if sys != nil && sys.BigFilesTemporaryDir != "" {
|
||||
return sys.BigFilesTemporaryDir
|
||||
}
|
||||
|
|
@ -32,3 +34,11 @@ func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
|
|||
}
|
||||
return temporaryDirectoryForBigFiles
|
||||
}
|
||||
|
||||
func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) {
|
||||
return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
|
||||
}
|
||||
|
||||
func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) {
|
||||
return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
3
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
|
|
@ -154,6 +154,9 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
|
||||
if info.CryptoOperation != types.PreserveOriginalCrypto {
|
||||
return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
3
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
|
|
@ -247,6 +247,9 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
m.LayersDescriptors[i].Digest = info.Digest
|
||||
m.LayersDescriptors[i].Size = info.Size
|
||||
m.LayersDescriptors[i].URLs = info.URLs
|
||||
if info.CryptoOperation != types.PreserveOriginalCrypto {
|
||||
return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
4
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
|
|
@ -202,7 +202,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
|||
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
|
||||
// and is probably better served by failing; we can always re-visit that later if we fail now, but
|
||||
// if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later.
|
||||
return nil, manifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
return nil, manifest.NewNonImageArtifactError(&m.Manifest)
|
||||
}
|
||||
|
||||
config, err := configGetter(m.ConfigInfo())
|
||||
|
|
@ -253,7 +253,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
|||
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
|
||||
// which can’t work with non-image artifacts.)
|
||||
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
|
||||
return "", manifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
return "", manifest.NewNonImageArtifactError(&m.Manifest)
|
||||
}
|
||||
|
||||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
|
|
@ -156,7 +156,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
|
|||
// createOCIRef creates the oci reference of the image
|
||||
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
|
||||
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
|
||||
dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
||||
dir, err := tmpdir.MkDirBigFileTemp(sys, "oci")
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err)
|
||||
}
|
||||
|
|
|
|||
393
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
393
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
|
|
@ -1,393 +0,0 @@
|
|||
// Package boltdb implements a BlobInfoCache backed by BoltDB.
|
||||
package boltdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
|
||||
// we can simply start over with a different filename; update blobInfoCacheFilename.
|
||||
|
||||
// FIXME: For CRI-O, does this need to hide information between different users?
|
||||
|
||||
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
|
||||
uncompressedDigestBucket = []byte("uncompressedDigest")
|
||||
// digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed
|
||||
// It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
|
||||
digestCompressorBucket = []byte("digestCompressor")
|
||||
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
|
||||
// (as a set of key=digest, value="" pairs)
|
||||
digestByUncompressedBucket = []byte("digestByUncompressed")
|
||||
// knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
|
||||
// a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
|
||||
knownLocationsBucket = []byte("knownLocations")
|
||||
)
|
||||
|
||||
// Concurrency:
|
||||
// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
|
||||
// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
|
||||
|
||||
// pathLock contains a lock for a specific BoltDB database path.
|
||||
type pathLock struct {
|
||||
refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
|
||||
mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
|
||||
}
|
||||
|
||||
var (
|
||||
// pathLocks contains a lock for each currently open file.
|
||||
// This must be global so that independently created instances of boltDBCache exclude each other.
|
||||
// The map is protected by pathLocksMutex.
|
||||
// FIXME? Should this be based on device:inode numbers instead of paths instead?
|
||||
pathLocks = map[string]*pathLock{}
|
||||
pathLocksMutex = sync.Mutex{}
|
||||
)
|
||||
|
||||
// lockPath obtains the pathLock for path.
|
||||
// The caller must call unlockPath eventually.
|
||||
func lockPath(path string) {
|
||||
pl := func() *pathLock { // A scope for defer
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if ok {
|
||||
pl.refCount++
|
||||
} else {
|
||||
pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
|
||||
pathLocks[path] = pl
|
||||
}
|
||||
return pl
|
||||
}()
|
||||
pl.mutex.Lock()
|
||||
}
|
||||
|
||||
// unlockPath releases the pathLock for path.
|
||||
func unlockPath(path string) {
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if !ok {
|
||||
// Should this return an error instead? BlobInfoCache ultimately ignores errors…
|
||||
panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
|
||||
}
|
||||
pl.mutex.Unlock()
|
||||
pl.refCount--
|
||||
if pl.refCount == 0 {
|
||||
delete(pathLocks, path)
|
||||
}
|
||||
}
|
||||
|
||||
// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
|
||||
//
|
||||
// Note that we don’t keep the database open across operations, because that would lock the file and block any other
|
||||
// users; instead, we need to open/close it for every single write or lookup.
|
||||
type cache struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
|
||||
//
|
||||
// Most users should call blobinfocache.DefaultCache instead.
|
||||
func New(path string) types.BlobInfoCache {
|
||||
return new2(path)
|
||||
}
|
||||
func new2(path string) *cache {
|
||||
return &cache{path: path}
|
||||
}
|
||||
|
||||
// view returns runs the specified fn within a read-only transaction on the database.
|
||||
func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
|
||||
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
|
||||
// a read lock, blocking any future writes.
|
||||
// Hence this preliminary check, which is RACY: Another process could remove the file
|
||||
// between the Lstat call and opening the database.
|
||||
if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.View(fn)
|
||||
}
|
||||
|
||||
// update returns runs the specified fn within a read-write transaction on the database.
|
||||
func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.Update(fn)
|
||||
}
|
||||
|
||||
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
|
||||
func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
|
||||
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
|
||||
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
|
||||
d, err := digest.Parse(string(uncompressedBytes))
|
||||
if err == nil {
|
||||
return d
|
||||
}
|
||||
// FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
}
|
||||
// Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if b := tx.Bucket(digestByUncompressedBucket); b != nil {
|
||||
if b = b.Bucket([]byte(anyDigest.String())); b != nil {
|
||||
c := b.Cursor()
|
||||
if k, _ := c.First(); k != nil { // The bucket is non-empty
|
||||
return anyDigest
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
var res digest.Digest
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
res = bdc.uncompressedDigest(tx, anyDigest)
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(anyDigest.String())
|
||||
if previousBytes := b.Get(key); previousBytes != nil {
|
||||
previous, err := digest.Parse(string(previousBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if err := b.Put(key, []byte(uncompressed.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
|
||||
// compressor, or is blobinfocache.Uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(digestCompressorBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(anyDigest.String())
|
||||
if previousBytes := b.Get(key); previousBytes != nil {
|
||||
if string(previousBytes) != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName)
|
||||
}
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
return b.Delete(key)
|
||||
}
|
||||
return b.Put(key, []byte(compressorName))
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := time.Now().MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
|
||||
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||
digestKey := []byte(digest.String())
|
||||
b := scopeBucket.Bucket(digestKey)
|
||||
if b == nil {
|
||||
return candidates
|
||||
}
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if compressionBucket != nil {
|
||||
// the bucket won't exist if the cache was created by a v1 implementation and
|
||||
// hasn't yet been updated by a v2 implementation
|
||||
if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 {
|
||||
compressorName = string(compressorNameValue)
|
||||
}
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo {
|
||||
return candidates
|
||||
}
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
t := time.Time{}
|
||||
if err := t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: types.BICLocationReference{Opaque: string(k)},
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
var uncompressedDigestValue digest.Digest // = ""
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
scopeBucket := tx.Bucket(knownLocationsBucket)
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
// compressionBucket won't have been created if previous writers never recorded info about compression,
|
||||
// and we don't want to fail just because of that
|
||||
compressionBucket := tx.Bucket(digestCompressorBucket)
|
||||
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo)
|
||||
if canSubstitute {
|
||||
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
|
||||
b := tx.Bucket(digestByUncompressedBucket)
|
||||
if b != nil {
|
||||
b = b.Bucket([]byte(uncompressedDigestValue.String()))
|
||||
if b != nil {
|
||||
if err := b.ForEach(func(k, _ []byte) error {
|
||||
d, err := digest.Parse(string(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d != primaryDigest && d != uncompressedDigestValue {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigestValue != primaryDigest {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||
}
|
||||
20
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
20
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
|
|
@ -6,8 +6,8 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/boltdb"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/memory"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/sqlite"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
@ -15,7 +15,7 @@ import (
|
|||
const (
|
||||
// blobInfoCacheFilename is the file name used for blob info caches.
|
||||
// If the format changes in an incompatible way, increase the version number.
|
||||
blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
|
||||
blobInfoCacheFilename = "blob-info-cache-v1.sqlite"
|
||||
// systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
|
||||
systemBlobInfoCacheDir = "/var/lib/containers/cache"
|
||||
)
|
||||
|
|
@ -57,10 +57,20 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
|||
}
|
||||
path := filepath.Join(dir, blobInfoCacheFilename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
|
||||
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err)
|
||||
return memory.New()
|
||||
}
|
||||
|
||||
logrus.Debugf("Using blob info cache at %s", path)
|
||||
return boltdb.New(path)
|
||||
// It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open
|
||||
// as global singleton, for the vast majority of callers who don’t override thde cache location.
|
||||
// OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely,
|
||||
// and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%.
|
||||
|
||||
cache, err := sqlite.New(path)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err)
|
||||
return memory.New()
|
||||
}
|
||||
logrus.Debugf("Using SQLite blob info cache at %s", path)
|
||||
return cache
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,6 +82,7 @@ func (css *candidateSortState) Swap(i, j int) {
|
|||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
|
||||
sort.Sort(&candidateSortState{
|
||||
cs: cs,
|
||||
primaryDigest: primaryDigest,
|
||||
|
|
|
|||
14
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
14
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
|
|
@ -27,7 +27,7 @@ type cache struct {
|
|||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
|
|
@ -51,6 +51,15 @@ func new2() *cache {
|
|||
}
|
||||
}
|
||||
|
||||
// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
|
||||
// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
|
||||
func (mem *cache) Open() {
|
||||
}
|
||||
|
||||
// Close destroys state created by Open().
|
||||
func (mem *cache) Close() {
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
|
|
@ -114,6 +123,9 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
|||
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName)
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
delete(mem.compressors, blobDigest)
|
||||
return
|
||||
|
|
|
|||
553
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
Normal file
553
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
Normal file
|
|
@ -0,0 +1,553 @@
|
|||
// Package boltdb implements a BlobInfoCache backed by SQLite.
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/v5/types"
|
||||
_ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
|
||||
// we can simply start over with a different filename; update blobInfoCacheFilename.
|
||||
// That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema
|
||||
// (which would require compatibility in both directions).
|
||||
|
||||
// Assembled sqlite options used when opening the database.
|
||||
sqliteOptions = "?" +
|
||||
// Deal with timezone automatically.
|
||||
// go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset.
|
||||
// _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or)
|
||||
// if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location;
|
||||
// (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used).
|
||||
"_loc=auto" +
|
||||
// Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous).
|
||||
"&_sync=FULL" +
|
||||
// Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys).
|
||||
// We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons).
|
||||
"&_foreign_keys=1" +
|
||||
// Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html);
|
||||
// i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock,
|
||||
// never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously).
|
||||
//
|
||||
// This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked” error,
|
||||
// the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous
|
||||
// holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented.
|
||||
// Compare https://github.com/mattn/go-sqlite3/issues/274 .
|
||||
//
|
||||
// Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167
|
||||
// or https://github.com/mattn/go-sqlite3/issues/400 .
|
||||
// The currently-proposed workaround is to create two different SQL “databases” (= connection pools) with different _txlock settings,
|
||||
// which seems rather wasteful.
|
||||
"&_txlock=exclusive"
|
||||
)
|
||||
|
||||
// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path.
|
||||
type cache struct {
|
||||
path string
|
||||
|
||||
// The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool.
|
||||
// That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily
|
||||
// the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly
|
||||
// incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have
|
||||
// a Close method, so creating a lot of single-use caches could leak data.
|
||||
//
|
||||
// Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image.
|
||||
// This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open.
|
||||
// Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single
|
||||
// *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every
|
||||
// single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation,
|
||||
// somewhere around "700").
|
||||
|
||||
lock sync.Mutex
|
||||
// The following fields can only be accessed with lock held.
|
||||
refCount int // number of outstanding Open() calls
|
||||
db *sql.DB // nil if not set (may happen even if refCount > 0 on errors)
|
||||
}
|
||||
|
||||
// New returns BlobInfoCache implementation which uses a SQLite file at path.
|
||||
//
|
||||
// Most users should call blobinfocache.DefaultCache instead.
|
||||
func New(path string) (types.BlobInfoCache, error) {
|
||||
return new2(path)
|
||||
}
|
||||
|
||||
func new2(path string) (*cache, error) {
|
||||
db, err := rawOpen(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// We don’t check the schema before every operation, because that would be costly
|
||||
// and because we assume schema changes will be handled by using a different path.
|
||||
if err := ensureDBHasCurrentSchema(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cache{
|
||||
path: path,
|
||||
refCount: 0,
|
||||
db: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// rawOpen returns a new *sql.DB for path.
|
||||
// The caller should arrange for it to be .Close()d.
|
||||
func rawOpen(path string) (*sql.DB, error) {
|
||||
// This exists to centralize the use of sqliteOptions.
|
||||
return sql.Open("sqlite3", path+sqliteOptions)
|
||||
}
|
||||
|
||||
// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
|
||||
// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
|
||||
func (sqc *cache) Open() {
|
||||
sqc.lock.Lock()
|
||||
defer sqc.lock.Unlock()
|
||||
|
||||
if sqc.refCount == 0 {
|
||||
db, err := rawOpen(sqc.path)
|
||||
if err != nil {
|
||||
logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err)
|
||||
db = nil // But still increase sqc.refCount, because a .Close() will happen
|
||||
}
|
||||
sqc.db = db
|
||||
}
|
||||
sqc.refCount++
|
||||
}
|
||||
|
||||
// Close destroys state created by Open().
|
||||
func (sqc *cache) Close() {
|
||||
sqc.lock.Lock()
|
||||
defer sqc.lock.Unlock()
|
||||
|
||||
switch sqc.refCount {
|
||||
case 0:
|
||||
logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()")
|
||||
return
|
||||
case 1:
|
||||
if sqc.db != nil {
|
||||
sqc.db.Close()
|
||||
sqc.db = nil
|
||||
}
|
||||
}
|
||||
sqc.refCount--
|
||||
}
|
||||
|
||||
type void struct{} // So that we don’t have to write struct{}{} all over the place
|
||||
|
||||
// transaction calls fn within a read-write transaction in sqc.
|
||||
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer
|
||||
sqc.lock.Lock()
|
||||
defer sqc.lock.Unlock()
|
||||
|
||||
if sqc.db != nil {
|
||||
return sqc.db, func() {}, nil
|
||||
}
|
||||
db, err := rawOpen(sqc.path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err)
|
||||
}
|
||||
return db, func() { db.Close() }, nil
|
||||
}()
|
||||
if err != nil {
|
||||
var zeroRes T // A zero value of T
|
||||
return zeroRes, err
|
||||
}
|
||||
defer closeDB()
|
||||
|
||||
return dbTransaction(db, fn)
|
||||
}
|
||||
|
||||
// dbTransaction calls fn within a read-write transaction in db.
|
||||
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
// Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion.
|
||||
|
||||
var zeroRes T // A zero value of T
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return zeroRes, fmt.Errorf("beginning transaction: %w", err)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Errorf("Rolling back transaction: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
res, err := fn(tx)
|
||||
if err != nil {
|
||||
return zeroRes, err
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return zeroRes, fmt.Errorf("committing transaction: %w", err)
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// querySingleValue executes a SELECT which is expected to return at most one row with a single column.
|
||||
// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned.
|
||||
func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) {
|
||||
var value T
|
||||
if err := tx.QueryRow(query, params...).Scan(&value); err != nil {
|
||||
var zeroValue T // A zero value of T
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return zeroValue, false, nil
|
||||
}
|
||||
return zeroValue, false, err
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
// ensureDBHasCurrentSchema adds the necessary tables and indices to a database.
|
||||
// This is typically used when creating a previously-nonexistent database.
|
||||
// We don’t really anticipate schema migrations; with c/image usually vendored, not using
|
||||
// shared libraries, migrating a schema on an existing database would affect old-version users.
|
||||
// Instead, schema changes are likely to be implemented by using a different cache file name,
|
||||
// and leaving existing caches around for old users.
|
||||
func ensureDBHasCurrentSchema(db *sql.DB) error {
|
||||
// Considered schema design alternatives:
|
||||
//
|
||||
// (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring
|
||||
// to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.)
|
||||
//
|
||||
// * This schema uses the text representation of digests.
|
||||
//
|
||||
// We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation;
|
||||
// and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm
|
||||
// is documented to “define the string encoding” suggests that assuming a hexadecimal representation and turning that
|
||||
// into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm
|
||||
// — and that would require us to implement two code paths, one of them basically never exercised / never tested.
|
||||
//
|
||||
// * There are two separate items for recording the uncompressed digest and digest compressors.
|
||||
// Alternatively, we could have a single "digest facts" table with NULLable columns.
|
||||
//
|
||||
// The way the BlobInfoCache API works, we are only going to write one value at a time, so
|
||||
// sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples).
|
||||
// Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs
|
||||
// do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then
|
||||
// we search in DigestUncompressedPairs by uncompressed digest, not by the primary key).
|
||||
//
|
||||
// Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to
|
||||
// do a more verbose ON CONFLICT(…) DO UPDATE SET … = ….
|
||||
//
|
||||
// * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
|
||||
//
|
||||
// Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
|
||||
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups)
|
||||
// is probably costlier than comparing a few more bytes of data.
|
||||
//
|
||||
// Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
|
||||
// having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally).
|
||||
//
|
||||
items := []struct{ itemName, command string }{
|
||||
{
|
||||
"DigestUncompressedPairs",
|
||||
`CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` +
|
||||
// index implied by PRIMARY KEY
|
||||
`anyDigest TEXT PRIMARY KEY NOT NULL,` +
|
||||
// DigestUncompressedPairs_index_uncompressedDigest
|
||||
`uncompressedDigest TEXT NOT NULL
|
||||
)`,
|
||||
},
|
||||
{
|
||||
"DigestUncompressedPairs_index_uncompressedDigest",
|
||||
`CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`,
|
||||
},
|
||||
{
|
||||
"DigestCompressors",
|
||||
`CREATE TABLE IF NOT EXISTS DigestCompressors(` +
|
||||
// index implied by PRIMARY KEY
|
||||
`digest TEXT PRIMARY KEY NOT NULL,` +
|
||||
// May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression).
|
||||
`compressor TEXT NOT NULL
|
||||
)`,
|
||||
},
|
||||
{
|
||||
"KnownLocations",
|
||||
`CREATE TABLE IF NOT EXISTS KnownLocations(
|
||||
transport TEXT NOT NULL,
|
||||
scope TEXT NOT NULL,
|
||||
digest TEXT NOT NULL,
|
||||
location TEXT NOT NULL,` +
|
||||
// TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics)
|
||||
// format "2006-01-02 15:04:05.999999999-07:00".
|
||||
// See also the _loc option in the sql.Open data source name.
|
||||
`time TIMESTAMP NOT NULL,` +
|
||||
// Implies an index.
|
||||
// We also search by (transport, scope, digest), that doesn’t need an extra index
|
||||
// because it is a prefix of the implied primary-key index.
|
||||
`PRIMARY KEY (transport, scope, digest, location)
|
||||
)`,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
|
||||
// If the the last-created item exists, assume nothing needs to be done.
|
||||
lastItemName := items[len(items)-1].itemName
|
||||
_, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName)
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err)
|
||||
}
|
||||
if !found {
|
||||
// Item does not exist, assuming a fresh database.
|
||||
for _, i := range items {
|
||||
if _, err := tx.Exec(i.command); err != nil {
|
||||
return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return void{}, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction.
|
||||
func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) {
|
||||
uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if found {
|
||||
d, err := digest.Parse(uncompressedString)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return d, nil
|
||||
|
||||
}
|
||||
// A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
_, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if found {
|
||||
return anyDigest, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
|
||||
return sqc.uncompressedDigest(tx, anyDigest)
|
||||
})
|
||||
if err != nil {
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest)
|
||||
}
|
||||
if gotPrevious {
|
||||
previous, err := digest.Parse(previousString)
|
||||
if err != nil {
|
||||
return void{}, err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)",
|
||||
anyDigest.String(), uncompressed.String()); err != nil {
|
||||
return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err)
|
||||
}
|
||||
return void{}, nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) {
|
||||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)",
|
||||
transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry.
|
||||
return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w",
|
||||
location.Opaque, transport.Name(), scope.Opaque, digest.String(), err)
|
||||
}
|
||||
return void{}, nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||
// or Uncompressed or UnknownCompression.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||
// digest just because some remote author claims so (e.g. because a manifest says so);
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
|
||||
}
|
||||
if gotPrevious && previous != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName)
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
|
||||
return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
|
||||
anyDigest.String(), compressorName); err != nil {
|
||||
return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err)
|
||||
}
|
||||
}
|
||||
return void{}, nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
if requireCompressionInfo {
|
||||
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
|
||||
"ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
} else {
|
||||
rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+
|
||||
"LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
blobinfocache.UnknownCompression,
|
||||
transport.Name(), scope.Opaque, digest.String())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("looking up candidate locations: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var location string
|
||||
var time time.Time
|
||||
var compressorName string
|
||||
if err := rows.Scan(&location, &time, &compressorName); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: types.BICLocationReference{Opaque: location},
|
||||
},
|
||||
LastSeen: time,
|
||||
})
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through locations: %w", err)
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
// The CompressorName fields in returned data must never be UnknownCompression.
|
||||
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if canSubstitute {
|
||||
uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
|
||||
// (In the extreme, we could turn _everything_ this function does into a single query.
|
||||
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
|
||||
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
|
||||
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying for other digests: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var otherDigestString string
|
||||
if err := rows.Scan(&otherDigestString); err != nil {
|
||||
return nil, fmt.Errorf("scanning other digest: %w", err)
|
||||
}
|
||||
otherDigest, err := digest.Parse(otherDigestString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through other digests: %w", err)
|
||||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
if err != nil {
|
||||
return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
|
||||
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false))
|
||||
}
|
||||
5
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
5
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
|
|
@ -519,11 +519,12 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
|
|||
if sys.LegacyFormatAuthFilePath != "" {
|
||||
return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
|
||||
}
|
||||
if sys.RootForImplicitAbsolutePaths != "" {
|
||||
// Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME
|
||||
if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" {
|
||||
return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil
|
||||
}
|
||||
}
|
||||
if goOS == "windows" || goOS == "darwin" {
|
||||
if goOS != "linux" {
|
||||
return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/signature/sigstore/copied.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/sigstore/copied.go
generated
vendored
|
|
@ -10,9 +10,9 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/secure-systems-lab/go-securesystemslib/encrypted"
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature"
|
||||
"github.com/theupdateframework/go-tuf/encrypted"
|
||||
)
|
||||
|
||||
// The following code was copied from github.com/sigstore.
|
||||
|
|
|
|||
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
|
|
@ -6,7 +6,7 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 27
|
||||
VersionMinor = 28
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
|
|
|||
16
vendor/github.com/containers/ocicrypt/.golangci.yml
generated
vendored
16
vendor/github.com/containers/ocicrypt/.golangci.yml
generated
vendored
|
|
@ -13,12 +13,12 @@ linters:
|
|||
|
||||
linters-settings:
|
||||
depguard:
|
||||
list-type: denylist
|
||||
include-go-root: true
|
||||
packages:
|
||||
# use "io" or "os" instead
|
||||
# https://go.dev/doc/go1.16#ioutil
|
||||
- io/ioutil
|
||||
rules:
|
||||
main:
|
||||
files:
|
||||
- $all
|
||||
deny:
|
||||
- pkg: "io/ioutil"
|
||||
|
||||
revive:
|
||||
severity: error
|
||||
|
|
@ -29,3 +29,7 @@ linters-settings:
|
|||
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
|
||||
staticcheck:
|
||||
# Suppress reports of deprecated packages
|
||||
checks: ["-SA1019"]
|
||||
|
|
|
|||
2
vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md
generated
vendored
2
vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md
generated
vendored
|
|
@ -1,3 +1,3 @@
|
|||
## The OCIcrypt Library Project Community Code of Conduct
|
||||
|
||||
The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
|
||||
The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
||||
|
|
|
|||
2
vendor/github.com/containers/ocicrypt/SECURITY.md
generated
vendored
2
vendor/github.com/containers/ocicrypt/SECURITY.md
generated
vendored
|
|
@ -1,3 +1,3 @@
|
|||
## Security and Disclosure Information Policy for the OCIcrypt Library Project
|
||||
|
||||
The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
|
||||
The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
|
||||
|
|
|
|||
2
vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go
generated
vendored
|
|
@ -102,7 +102,7 @@ func GetDefaultModuleDirectories() []string {
|
|||
"/usr/lib/softhsm/", // Debian,Ubuntu
|
||||
}
|
||||
|
||||
// Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|s390x)-linux-gnu/
|
||||
// Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/
|
||||
hosttype, ostype, q := getHostAndOsType()
|
||||
if len(hosttype) > 0 {
|
||||
dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q)
|
||||
|
|
|
|||
2
vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go
generated
vendored
|
|
@ -105,6 +105,8 @@ func getHostAndOsType() (string, string, string) {
|
|||
ht = "x86_64"
|
||||
case "ppc64le":
|
||||
ht = "powerpc64le"
|
||||
case "riscv64":
|
||||
ht = "riscv64"
|
||||
case "s390x":
|
||||
ht = "s390x"
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
generated
vendored
2
vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
generated
vendored
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/utils"
|
||||
jose "gopkg.in/square/go-jose.v2"
|
||||
"github.com/go-jose/go-jose/v3"
|
||||
)
|
||||
|
||||
type jweKeyWrapper struct {
|
||||
|
|
|
|||
7
vendor/github.com/containers/ocicrypt/utils/utils.go
generated
vendored
7
vendor/github.com/containers/ocicrypt/utils/utils.go
generated
vendored
|
|
@ -26,14 +26,13 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/containers/ocicrypt/crypto/pkcs11"
|
||||
|
||||
"github.com/go-jose/go-jose/v3"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
json "gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key
|
||||
func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
|
||||
jwk := json.JSONWebKey{}
|
||||
jwk := jose.JSONWebKey{}
|
||||
err := jwk.UnmarshalJSON(privKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err)
|
||||
|
|
@ -46,7 +45,7 @@ func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
|
|||
|
||||
// parseJWKPublicKey parses the input byte array as a JWK
|
||||
func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) {
|
||||
jwk := json.JSONWebKey{}
|
||||
jwk := jose.JSONWebKey{}
|
||||
err := jwk.UnmarshalJSON(privKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err)
|
||||
|
|
|
|||
5
vendor/github.com/containers/storage/pkg/archive/changes_other.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/archive/changes_other.go
generated
vendored
|
|
@ -92,7 +92,10 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
|
|||
return err
|
||||
}
|
||||
|
||||
if s.Dev() != sourceStat.Dev() {
|
||||
// Don't cross mount points. This ignores file mounts to avoid
|
||||
// generating a diff which deletes all files following the
|
||||
// mount.
|
||||
if s.Dev() != sourceStat.Dev() && s.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
|
|
|
|||
101
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
101
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
|
|
@ -8,6 +8,7 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
|
@ -99,7 +100,7 @@ const (
|
|||
// FooterSizeSupported is the footer size supported by this implementation.
|
||||
// Newer versions of the image format might increase this value, so reject
|
||||
// any version that is not supported.
|
||||
FooterSizeSupported = 56
|
||||
FooterSizeSupported = 64
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -108,7 +109,7 @@ var (
|
|||
// https://tools.ietf.org/html/rfc8478#section-3.1.2
|
||||
skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
|
||||
|
||||
ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
|
||||
ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
|
||||
)
|
||||
|
||||
func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
|
||||
|
|
@ -183,13 +184,19 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
return err
|
||||
}
|
||||
|
||||
// Store the offset to the manifest and its size in LE order
|
||||
manifestDataLE := make([]byte, FooterSizeSupported)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest)))
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest)))
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS))
|
||||
copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic)
|
||||
footer := ZstdChunkedFooterData{
|
||||
ManifestType: uint64(ManifestTypeCRFS),
|
||||
Offset: manifestOffset,
|
||||
LengthCompressed: uint64(len(compressedManifest)),
|
||||
LengthUncompressed: uint64(len(manifest)),
|
||||
ChecksumAnnotation: "", // unused
|
||||
OffsetTarSplit: uint64(tarSplitOffset),
|
||||
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
|
||||
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
|
||||
ChecksumAnnotationTarSplit: "", // unused
|
||||
}
|
||||
|
||||
manifestDataLE := footerDataToBlob(footer)
|
||||
|
||||
return appendZstdSkippableFrame(dest, manifestDataLE)
|
||||
}
|
||||
|
|
@ -198,3 +205,79 @@ func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
|||
el := zstd.EncoderLevelFromZstd(level)
|
||||
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
||||
}
|
||||
|
||||
// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer.
|
||||
type ZstdChunkedFooterData struct {
|
||||
ManifestType uint64
|
||||
|
||||
Offset uint64
|
||||
LengthCompressed uint64
|
||||
LengthUncompressed uint64
|
||||
ChecksumAnnotation string // Only used when reading a layer, not when creating it
|
||||
|
||||
OffsetTarSplit uint64
|
||||
LengthCompressedTarSplit uint64
|
||||
LengthUncompressedTarSplit uint64
|
||||
ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it
|
||||
}
|
||||
|
||||
func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
|
||||
// Store the offset to the manifest and its size in LE order
|
||||
manifestDataLE := make([]byte, FooterSizeSupported)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit)
|
||||
binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit)
|
||||
copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic)
|
||||
|
||||
return manifestDataLE
|
||||
}
|
||||
|
||||
// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations.
|
||||
func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) {
|
||||
var footerData ZstdChunkedFooterData
|
||||
|
||||
footerData.ChecksumAnnotation = annotations[ManifestChecksumKey]
|
||||
if footerData.ChecksumAnnotation == "" {
|
||||
return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey)
|
||||
}
|
||||
|
||||
offsetMetadata := annotations[ManifestInfoKey]
|
||||
|
||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil {
|
||||
return footerData, err
|
||||
}
|
||||
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil {
|
||||
return footerData, err
|
||||
}
|
||||
footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey]
|
||||
}
|
||||
return footerData, nil
|
||||
}
|
||||
|
||||
// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer.
|
||||
func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) {
|
||||
var footerData ZstdChunkedFooterData
|
||||
|
||||
if len(footer) < FooterSizeSupported {
|
||||
return footerData, errors.New("blob too small")
|
||||
}
|
||||
footerData.Offset = binary.LittleEndian.Uint64(footer[0:8])
|
||||
footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16])
|
||||
footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
||||
footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32])
|
||||
footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40])
|
||||
footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48])
|
||||
footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56])
|
||||
|
||||
// the magic number is stored in the last 8 bytes
|
||||
if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) {
|
||||
return footerData, errors.New("invalid magic number")
|
||||
}
|
||||
return footerData, nil
|
||||
}
|
||||
|
|
|
|||
82
vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go
generated
vendored
Normal file
82
vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go
generated
vendored
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
package lockfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
cryptorand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LastWrite is an opaque identifier of the last write to some *LockFile.
|
||||
// It can be used by users of a *LockFile to determine if the lock indicates changes
|
||||
// since the last check.
|
||||
//
|
||||
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
|
||||
type LastWrite struct {
|
||||
// Never modify fields of a LastWrite object; it has value semantics.
|
||||
state []byte // Contents of the lock file.
|
||||
}
|
||||
|
||||
var lastWriterIDCounter uint64 // Private state for newLastWriterID
|
||||
|
||||
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
|
||||
// newLastWrite returns a new "last write" ID.
|
||||
// The value must be different on every call, and also differ from values
|
||||
// generated by other processes.
|
||||
func newLastWrite() LastWrite {
|
||||
// The ID is (PID, time, per-process counter, random)
|
||||
// PID + time represents both a unique process across reboots,
|
||||
// and a specific time within the process; the per-process counter
|
||||
// is an extra safeguard for in-process concurrency.
|
||||
// The random part disambiguates across process namespaces
|
||||
// (where PID values might collide), serves as a general-purpose
|
||||
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
|
||||
// because other versions of this code exist and they don't work
|
||||
// efficiently if the size of the value changes.
|
||||
pid := os.Getpid()
|
||||
tm := time.Now().UnixNano()
|
||||
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
|
||||
|
||||
res := make([]byte, lastWriterIDSize)
|
||||
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
|
||||
binary.LittleEndian.PutUint64(res[8:16], counter)
|
||||
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
|
||||
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
|
||||
panic(err) // This shouldn't happen
|
||||
}
|
||||
|
||||
return LastWrite{
|
||||
state: res,
|
||||
}
|
||||
}
|
||||
|
||||
// serialize returns bytes to write to the lock file to represent the specified write.
|
||||
func (lw LastWrite) serialize() []byte {
|
||||
if lw.state == nil {
|
||||
panic("LastWrite.serialize on an uninitialized object")
|
||||
}
|
||||
return lw.state
|
||||
}
|
||||
|
||||
// Equals returns true if lw matches other
|
||||
func (lw LastWrite) equals(other LastWrite) bool {
|
||||
if lw.state == nil {
|
||||
panic("LastWrite.equals on an uninitialized object")
|
||||
}
|
||||
if other.state == nil {
|
||||
panic("LastWrite.equals with an uninitialized counterparty")
|
||||
}
|
||||
return bytes.Equal(lw.state, other.state)
|
||||
}
|
||||
|
||||
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
|
||||
func newLastWriteFromData(serialized []byte) LastWrite {
|
||||
if serialized == nil {
|
||||
panic("newLastWriteFromData with nil data")
|
||||
}
|
||||
return LastWrite{
|
||||
state: serialized,
|
||||
}
|
||||
}
|
||||
279
vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
generated
vendored
279
vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
generated
vendored
|
|
@ -2,6 +2,7 @@ package lockfile
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -54,6 +55,38 @@ type Locker interface {
|
|||
AssertLockedForWriting()
|
||||
}
|
||||
|
||||
type lockType byte
|
||||
|
||||
const (
|
||||
readLock lockType = iota
|
||||
writeLock
|
||||
)
|
||||
|
||||
// LockFile represents a file lock where the file is used to cache an
|
||||
// identifier of the last party that made changes to whatever's being protected
|
||||
// by the lock.
|
||||
//
|
||||
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
|
||||
type LockFile struct {
|
||||
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
|
||||
// They are safe to access without any other locking.
|
||||
file string
|
||||
ro bool
|
||||
|
||||
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
|
||||
rwMutex *sync.RWMutex
|
||||
// stateMutex is used to synchronize concurrent accesses to the state below
|
||||
stateMutex *sync.Mutex
|
||||
counter int64
|
||||
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
|
||||
lockType lockType
|
||||
locked bool
|
||||
// The following fields are only modified on transitions between counter == 0 / counter != 0.
|
||||
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
|
||||
// In other cases, they need to be protected using stateMutex.
|
||||
fd fileHandle
|
||||
}
|
||||
|
||||
var (
|
||||
lockFiles map[string]*LockFile
|
||||
lockFilesLock sync.Mutex
|
||||
|
|
@ -91,6 +124,156 @@ func GetROLockfile(path string) (Locker, error) {
|
|||
return GetROLockFile(path)
|
||||
}
|
||||
|
||||
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
|
||||
func (l *LockFile) Lock() {
|
||||
if l.ro {
|
||||
panic("can't take write lock on read-only lock file")
|
||||
} else {
|
||||
l.lock(writeLock)
|
||||
}
|
||||
}
|
||||
|
||||
// LockRead locks the lockfile as a reader.
|
||||
func (l *LockFile) RLock() {
|
||||
l.lock(readLock)
|
||||
}
|
||||
|
||||
// Unlock unlocks the lockfile.
|
||||
func (l *LockFile) Unlock() {
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked {
|
||||
// Panic when unlocking an unlocked lock. That's a violation
|
||||
// of the lock semantics and will reveal such.
|
||||
panic("calling Unlock on unlocked lock")
|
||||
}
|
||||
l.counter--
|
||||
if l.counter < 0 {
|
||||
// Panic when the counter is negative. There is no way we can
|
||||
// recover from a corrupted lock and we need to protect the
|
||||
// storage from corruption.
|
||||
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
|
||||
}
|
||||
if l.counter == 0 {
|
||||
// We should only release the lock when the counter is 0 to
|
||||
// avoid releasing read-locks too early; a given process may
|
||||
// acquire a read lock multiple times.
|
||||
l.locked = false
|
||||
// Close the file descriptor on the last unlock, releasing the
|
||||
// file lock.
|
||||
unlockAndCloseHandle(l.fd)
|
||||
}
|
||||
if l.lockType == readLock {
|
||||
l.rwMutex.RUnlock()
|
||||
} else {
|
||||
l.rwMutex.Unlock()
|
||||
}
|
||||
l.stateMutex.Unlock()
|
||||
}
|
||||
|
||||
func (l *LockFile) AssertLocked() {
|
||||
// DO NOT provide a variant that returns the value of l.locked.
|
||||
//
|
||||
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
|
||||
// we can’t tell the difference.
|
||||
//
|
||||
// Hence, this “AssertLocked” method, which exists only for sanity checks.
|
||||
|
||||
// Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
|
||||
// with no possible writers.
|
||||
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
|
||||
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
|
||||
if !l.locked {
|
||||
panic("internal error: lock is not held by the expected owner")
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LockFile) AssertLockedForWriting() {
|
||||
// DO NOT provide a variant that returns the current lock state.
|
||||
//
|
||||
// The same caveats as for AssertLocked apply equally.
|
||||
|
||||
l.AssertLocked()
|
||||
// Like AssertLocked, don’t even bother with l.stateMutex.
|
||||
if l.lockType == readLock {
|
||||
panic("internal error: lock is not held for writing")
|
||||
}
|
||||
}
|
||||
|
||||
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
|
||||
// and returns the one to record instead.
|
||||
//
|
||||
// If ModifiedSince reports no modification, the previous LastWrite value
|
||||
// is still valid and can continue to be used.
|
||||
//
|
||||
// If this function fails, the LastWriter value of the lock is indeterminate;
|
||||
// the caller should fail and keep using the previously-recorded LastWrite value,
|
||||
// so that it continues failing until the situation is resolved. Similarly,
|
||||
// it should only update the recorded LastWrite value after processing the update:
|
||||
//
|
||||
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
|
||||
// if err != nil { /* fail */ }
|
||||
// state.lastWrite = lw2
|
||||
// if modified {
|
||||
// if err := reload(); err != nil { /* fail */ }
|
||||
// state.lastWrite = lw2
|
||||
// }
|
||||
//
|
||||
// The caller must hold the lock (for reading or writing).
|
||||
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
|
||||
l.AssertLocked()
|
||||
currentLW, err := l.GetLastWrite()
|
||||
if err != nil {
|
||||
return LastWrite{}, false, err
|
||||
}
|
||||
modified := !previous.equals(currentLW)
|
||||
return currentLW, modified, nil
|
||||
}
|
||||
|
||||
// Modified indicates if the lockfile has been updated since the last time it
|
||||
// was loaded.
|
||||
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
|
||||
// Callers cannot, in general, rely on this, because that might have happened for some other
|
||||
// owner of the same *LockFile who created it previously.
|
||||
//
|
||||
// Deprecated: Use *LockFile.ModifiedSince.
|
||||
func (l *LockFile) Modified() (bool, error) {
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked {
|
||||
panic("attempted to check last-writer in lockfile without locking it first")
|
||||
}
|
||||
defer l.stateMutex.Unlock()
|
||||
oldLW := l.lw
|
||||
// Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it.
|
||||
currentLW, modified, err := l.ModifiedSince(oldLW)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
l.lw = currentLW
|
||||
return modified, nil
|
||||
}
|
||||
|
||||
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
|
||||
//
|
||||
// Deprecated: Use *LockFile.RecordWrite.
|
||||
func (l *LockFile) Touch() error {
|
||||
lw, err := l.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked || (l.lockType == readLock) {
|
||||
panic("attempted to update last-writer in lockfile without the write lock")
|
||||
}
|
||||
defer l.stateMutex.Unlock()
|
||||
l.lw = lw
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsReadWrite indicates if the lock file is a read-write lock.
|
||||
func (l *LockFile) IsReadWrite() bool {
|
||||
return !l.ro
|
||||
}
|
||||
|
||||
// getLockFile returns a *LockFile object, possibly (depending on the platform)
|
||||
// working inter-process, and associated with the specified path.
|
||||
//
|
||||
|
|
@ -128,3 +311,99 @@ func getLockfile(path string, ro bool) (*LockFile, error) {
|
|||
lockFiles[cleanPath] = lockFile
|
||||
return lockFile, nil
|
||||
}
|
||||
|
||||
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
|
||||
// working inter-process and associated with the specified path.
|
||||
//
|
||||
// This function will be called at most once for each path value within a single process.
|
||||
//
|
||||
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
|
||||
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
|
||||
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
|
||||
//
|
||||
// WARNING:
|
||||
// - The lock may or MAY NOT be inter-process.
|
||||
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
|
||||
// - Even if ro, the lock MAY be exclusive.
|
||||
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
|
||||
// Check if we can open the lock.
|
||||
fd, err := openLock(path, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
unlockAndCloseHandle(fd)
|
||||
|
||||
lType := writeLock
|
||||
if ro {
|
||||
lType = readLock
|
||||
}
|
||||
|
||||
return &LockFile{
|
||||
file: path,
|
||||
ro: ro,
|
||||
|
||||
rwMutex: &sync.RWMutex{},
|
||||
stateMutex: &sync.Mutex{},
|
||||
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
|
||||
lockType: lType,
|
||||
locked: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// openLock opens the file at path and returns the corresponding file
|
||||
// descriptor. The path is opened either read-only or read-write,
|
||||
// depending on the value of ro argument.
|
||||
//
|
||||
// openLock will create the file and its parent directories,
|
||||
// if necessary.
|
||||
func openLock(path string, ro bool) (fd fileHandle, err error) {
|
||||
flags := os.O_CREATE
|
||||
if ro {
|
||||
flags |= os.O_RDONLY
|
||||
} else {
|
||||
flags |= os.O_RDWR
|
||||
}
|
||||
fd, err = openHandle(path, flags)
|
||||
if err == nil {
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// the directory of the lockfile seems to be removed, try to create it
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||
return fd, fmt.Errorf("creating lock file directory: %w", err)
|
||||
}
|
||||
|
||||
return openLock(path, ro)
|
||||
}
|
||||
|
||||
return fd, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
// lock locks the lockfile via syscall based on the specified type and
|
||||
// command.
|
||||
func (l *LockFile) lock(lType lockType) {
|
||||
if lType == readLock {
|
||||
l.rwMutex.RLock()
|
||||
} else {
|
||||
l.rwMutex.Lock()
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
defer l.stateMutex.Unlock()
|
||||
if l.counter == 0 {
|
||||
// If we're the first reference on the lock, we need to open the file again.
|
||||
fd, err := openLock(l.file, l.ro)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
l.fd = fd
|
||||
|
||||
// Optimization: only use the (expensive) syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
lockHandle(l.fd, lType)
|
||||
}
|
||||
l.lockType = lType
|
||||
l.locked = true
|
||||
l.counter++
|
||||
}
|
||||
|
|
|
|||
387
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
387
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
|
|
@ -4,297 +4,13 @@
|
|||
package lockfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
cryptorand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// *LockFile represents a file lock where the file is used to cache an
|
||||
// identifier of the last party that made changes to whatever's being protected
|
||||
// by the lock.
|
||||
//
|
||||
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
|
||||
type LockFile struct {
|
||||
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
|
||||
// They are safe to access without any other locking.
|
||||
file string
|
||||
ro bool
|
||||
|
||||
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
|
||||
rwMutex *sync.RWMutex
|
||||
// stateMutex is used to synchronize concurrent accesses to the state below
|
||||
stateMutex *sync.Mutex
|
||||
counter int64
|
||||
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
|
||||
locktype int16
|
||||
locked bool
|
||||
// The following fields are only modified on transitions between counter == 0 / counter != 0.
|
||||
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
|
||||
// In other cases, they need to be protected using stateMutex.
|
||||
fd uintptr
|
||||
}
|
||||
|
||||
// LastWrite is an opaque identifier of the last write to some *LockFile.
|
||||
// It can be used by users of a *LockFile to determine if the lock indicates changes
|
||||
// since the last check.
|
||||
//
|
||||
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
|
||||
type LastWrite struct {
|
||||
// Never modify fields of a LastWrite object; it has value semantics.
|
||||
state []byte // Contents of the lock file.
|
||||
}
|
||||
|
||||
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
|
||||
var lastWriterIDCounter uint64 // Private state for newLastWriterID
|
||||
|
||||
// newLastWrite returns a new "last write" ID.
|
||||
// The value must be different on every call, and also differ from values
|
||||
// generated by other processes.
|
||||
func newLastWrite() LastWrite {
|
||||
// The ID is (PID, time, per-process counter, random)
|
||||
// PID + time represents both a unique process across reboots,
|
||||
// and a specific time within the process; the per-process counter
|
||||
// is an extra safeguard for in-process concurrency.
|
||||
// The random part disambiguates across process namespaces
|
||||
// (where PID values might collide), serves as a general-purpose
|
||||
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
|
||||
// because other versions of this code exist and they don't work
|
||||
// efficiently if the size of the value changes.
|
||||
pid := os.Getpid()
|
||||
tm := time.Now().UnixNano()
|
||||
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
|
||||
|
||||
res := make([]byte, lastWriterIDSize)
|
||||
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
|
||||
binary.LittleEndian.PutUint64(res[8:16], counter)
|
||||
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
|
||||
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
|
||||
panic(err) // This shouldn't happen
|
||||
}
|
||||
|
||||
return LastWrite{
|
||||
state: res,
|
||||
}
|
||||
}
|
||||
|
||||
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
|
||||
func newLastWriteFromData(serialized []byte) LastWrite {
|
||||
if serialized == nil {
|
||||
panic("newLastWriteFromData with nil data")
|
||||
}
|
||||
return LastWrite{
|
||||
state: serialized,
|
||||
}
|
||||
}
|
||||
|
||||
// serialize returns bytes to write to the lock file to represent the specified write.
|
||||
func (lw LastWrite) serialize() []byte {
|
||||
if lw.state == nil {
|
||||
panic("LastWrite.serialize on an uninitialized object")
|
||||
}
|
||||
return lw.state
|
||||
}
|
||||
|
||||
// Equals returns true if lw matches other
|
||||
func (lw LastWrite) equals(other LastWrite) bool {
|
||||
if lw.state == nil {
|
||||
panic("LastWrite.equals on an uninitialized object")
|
||||
}
|
||||
if other.state == nil {
|
||||
panic("LastWrite.equals with an uninitialized counterparty")
|
||||
}
|
||||
return bytes.Equal(lw.state, other.state)
|
||||
}
|
||||
|
||||
// openLock opens the file at path and returns the corresponding file
|
||||
// descriptor. The path is opened either read-only or read-write,
|
||||
// depending on the value of ro argument.
|
||||
//
|
||||
// openLock will create the file and its parent directories,
|
||||
// if necessary.
|
||||
func openLock(path string, ro bool) (fd int, err error) {
|
||||
flags := unix.O_CLOEXEC | os.O_CREATE
|
||||
if ro {
|
||||
flags |= os.O_RDONLY
|
||||
} else {
|
||||
flags |= os.O_RDWR
|
||||
}
|
||||
fd, err = unix.Open(path, flags, 0o644)
|
||||
if err == nil {
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// the directory of the lockfile seems to be removed, try to create it
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||
return fd, fmt.Errorf("creating lock file directory: %w", err)
|
||||
}
|
||||
|
||||
return openLock(path, ro)
|
||||
}
|
||||
|
||||
return fd, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
|
||||
// working inter-process and associated with the specified path.
|
||||
//
|
||||
// This function will be called at most once for each path value within a single process.
|
||||
//
|
||||
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
|
||||
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
|
||||
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
|
||||
//
|
||||
// WARNING:
|
||||
// - The lock may or MAY NOT be inter-process.
|
||||
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
|
||||
// - Even if ro, the lock MAY be exclusive.
|
||||
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
|
||||
// Check if we can open the lock.
|
||||
fd, err := openLock(path, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
unix.Close(fd)
|
||||
|
||||
locktype := unix.F_WRLCK
|
||||
if ro {
|
||||
locktype = unix.F_RDLCK
|
||||
}
|
||||
return &LockFile{
|
||||
file: path,
|
||||
ro: ro,
|
||||
|
||||
rwMutex: &sync.RWMutex{},
|
||||
stateMutex: &sync.Mutex{},
|
||||
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
|
||||
locktype: int16(locktype),
|
||||
locked: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// lock locks the lockfile via FCTNL(2) based on the specified type and
|
||||
// command.
|
||||
func (l *LockFile) lock(lType int16) {
|
||||
lk := unix.Flock_t{
|
||||
Type: lType,
|
||||
Whence: int16(unix.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
}
|
||||
switch lType {
|
||||
case unix.F_RDLCK:
|
||||
l.rwMutex.RLock()
|
||||
case unix.F_WRLCK:
|
||||
l.rwMutex.Lock()
|
||||
default:
|
||||
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
defer l.stateMutex.Unlock()
|
||||
if l.counter == 0 {
|
||||
// If we're the first reference on the lock, we need to open the file again.
|
||||
fd, err := openLock(l.file, l.ro)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
l.fd = uintptr(fd)
|
||||
|
||||
// Optimization: only use the (expensive) fcntl syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
l.locktype = lType
|
||||
l.locked = true
|
||||
l.counter++
|
||||
}
|
||||
|
||||
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
|
||||
func (l *LockFile) Lock() {
|
||||
if l.ro {
|
||||
panic("can't take write lock on read-only lock file")
|
||||
} else {
|
||||
l.lock(unix.F_WRLCK)
|
||||
}
|
||||
}
|
||||
|
||||
// LockRead locks the lockfile as a reader.
|
||||
func (l *LockFile) RLock() {
|
||||
l.lock(unix.F_RDLCK)
|
||||
}
|
||||
|
||||
// Unlock unlocks the lockfile.
|
||||
func (l *LockFile) Unlock() {
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked {
|
||||
// Panic when unlocking an unlocked lock. That's a violation
|
||||
// of the lock semantics and will reveal such.
|
||||
panic("calling Unlock on unlocked lock")
|
||||
}
|
||||
l.counter--
|
||||
if l.counter < 0 {
|
||||
// Panic when the counter is negative. There is no way we can
|
||||
// recover from a corrupted lock and we need to protect the
|
||||
// storage from corruption.
|
||||
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
|
||||
}
|
||||
if l.counter == 0 {
|
||||
// We should only release the lock when the counter is 0 to
|
||||
// avoid releasing read-locks too early; a given process may
|
||||
// acquire a read lock multiple times.
|
||||
l.locked = false
|
||||
// Close the file descriptor on the last unlock, releasing the
|
||||
// file lock.
|
||||
unix.Close(int(l.fd))
|
||||
}
|
||||
if l.locktype == unix.F_RDLCK {
|
||||
l.rwMutex.RUnlock()
|
||||
} else {
|
||||
l.rwMutex.Unlock()
|
||||
}
|
||||
l.stateMutex.Unlock()
|
||||
}
|
||||
|
||||
func (l *LockFile) AssertLocked() {
|
||||
// DO NOT provide a variant that returns the value of l.locked.
|
||||
//
|
||||
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
|
||||
// we can’t tell the difference.
|
||||
//
|
||||
// Hence, this “AssertLocked” method, which exists only for sanity checks.
|
||||
|
||||
// Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
|
||||
// with no possible writers.
|
||||
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
|
||||
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
|
||||
if !l.locked {
|
||||
panic("internal error: lock is not held by the expected owner")
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LockFile) AssertLockedForWriting() {
|
||||
// DO NOT provide a variant that returns the current lock state.
|
||||
//
|
||||
// The same caveats as for AssertLocked apply equally.
|
||||
|
||||
l.AssertLocked()
|
||||
// Like AssertLocked, don’t even bother with l.stateMutex.
|
||||
if l.locktype != unix.F_WRLCK {
|
||||
panic("internal error: lock is not held for writing")
|
||||
}
|
||||
}
|
||||
type fileHandle uintptr
|
||||
|
||||
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
|
||||
// This is typically called before (_not after_) loading the state when initializing a consumer
|
||||
|
|
@ -341,81 +57,6 @@ func (l *LockFile) RecordWrite() (LastWrite, error) {
|
|||
return lw, nil
|
||||
}
|
||||
|
||||
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
|
||||
// and returns the one to record instead.
|
||||
//
|
||||
// If ModifiedSince reports no modification, the previous LastWrite value
|
||||
// is still valid and can continue to be used.
|
||||
//
|
||||
// If this function fails, the LastWriter value of the lock is indeterminate;
|
||||
// the caller should fail and keep using the previously-recorded LastWrite value,
|
||||
// so that it continues failing until the situation is resolved. Similarly,
|
||||
// it should only update the recorded LastWrite value after processing the update:
|
||||
//
|
||||
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
|
||||
// if err != nil { /* fail */ }
|
||||
// state.lastWrite = lw2
|
||||
// if modified {
|
||||
// if err := reload(); err != nil { /* fail */ }
|
||||
// state.lastWrite = lw2
|
||||
// }
|
||||
//
|
||||
// The caller must hold the lock (for reading or writing).
|
||||
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
|
||||
l.AssertLocked()
|
||||
currentLW, err := l.GetLastWrite()
|
||||
if err != nil {
|
||||
return LastWrite{}, false, err
|
||||
}
|
||||
modified := !previous.equals(currentLW)
|
||||
return currentLW, modified, nil
|
||||
}
|
||||
|
||||
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
|
||||
//
|
||||
// Deprecated: Use *LockFile.RecordWrite.
|
||||
func (l *LockFile) Touch() error {
|
||||
lw, err := l.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked || (l.locktype != unix.F_WRLCK) {
|
||||
panic("attempted to update last-writer in lockfile without the write lock")
|
||||
}
|
||||
defer l.stateMutex.Unlock()
|
||||
l.lw = lw
|
||||
return nil
|
||||
}
|
||||
|
||||
// Modified indicates if the lockfile has been updated since the last time it
|
||||
// was loaded.
|
||||
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
|
||||
// Callers cannot, in general, rely on this, because that might have happened for some other
|
||||
// owner of the same *LockFile who created it previously.
|
||||
//
|
||||
// Deprecated: Use *LockFile.ModifiedSince.
|
||||
func (l *LockFile) Modified() (bool, error) {
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked {
|
||||
panic("attempted to check last-writer in lockfile without locking it first")
|
||||
}
|
||||
defer l.stateMutex.Unlock()
|
||||
oldLW := l.lw
|
||||
// Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it.
|
||||
currentLW, modified, err := l.ModifiedSince(oldLW)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
l.lw = currentLW
|
||||
return modified, nil
|
||||
}
|
||||
|
||||
// IsReadWriteLock indicates if the lock file is a read-write lock.
|
||||
func (l *LockFile) IsReadWrite() bool {
|
||||
return !l.ro
|
||||
}
|
||||
|
||||
// TouchedSince indicates if the lock file has been touched since the specified time
|
||||
func (l *LockFile) TouchedSince(when time.Time) bool {
|
||||
st, err := system.Fstat(int(l.fd))
|
||||
|
|
@ -426,3 +67,29 @@ func (l *LockFile) TouchedSince(when time.Time) bool {
|
|||
touched := time.Unix(mtim.Unix())
|
||||
return when.Before(touched)
|
||||
}
|
||||
|
||||
func openHandle(path string, mode int) (fileHandle, error) {
|
||||
mode |= unix.O_CLOEXEC
|
||||
fd, err := unix.Open(path, mode, 0o644)
|
||||
return fileHandle(fd), err
|
||||
}
|
||||
|
||||
func lockHandle(fd fileHandle, lType lockType) {
|
||||
fType := unix.F_RDLCK
|
||||
if lType != readLock {
|
||||
fType = unix.F_WRLCK
|
||||
}
|
||||
lk := unix.Flock_t{
|
||||
Type: int16(fType),
|
||||
Whence: int16(unix.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
}
|
||||
for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func unlockAndCloseHandle(fd fileHandle) {
|
||||
unix.Close(int(fd))
|
||||
}
|
||||
|
|
|
|||
167
vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
generated
vendored
167
vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
generated
vendored
|
|
@ -5,81 +5,19 @@ package lockfile
|
|||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// createLockFileForPath returns a *LockFile object, possibly (depending on the platform)
|
||||
// working inter-process and associated with the specified path.
|
||||
//
|
||||
// This function will be called at most once for each path value within a single process.
|
||||
//
|
||||
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
|
||||
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
|
||||
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
|
||||
//
|
||||
// WARNING:
|
||||
// - The lock may or MAY NOT be inter-process.
|
||||
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
|
||||
// - Even if ro, the lock MAY be exclusive.
|
||||
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
|
||||
return &LockFile{locked: false}, nil
|
||||
}
|
||||
const (
|
||||
reserved = 0
|
||||
allBytes = ^uint32(0)
|
||||
)
|
||||
|
||||
// *LockFile represents a file lock where the file is used to cache an
|
||||
// identifier of the last party that made changes to whatever's being protected
|
||||
// by the lock.
|
||||
//
|
||||
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
|
||||
type LockFile struct {
|
||||
mu sync.Mutex
|
||||
file string
|
||||
locked bool
|
||||
}
|
||||
type fileHandle windows.Handle
|
||||
|
||||
// LastWrite is an opaque identifier of the last write to some *LockFile.
|
||||
// It can be used by users of a *LockFile to determine if the lock indicates changes
|
||||
// since the last check.
|
||||
// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes.
|
||||
type LastWrite struct {
|
||||
// Nothing: The Windows “implementation” does not actually track writes.
|
||||
}
|
||||
|
||||
func (l *LockFile) Lock() {
|
||||
l.mu.Lock()
|
||||
l.locked = true
|
||||
}
|
||||
|
||||
func (l *LockFile) RLock() {
|
||||
l.mu.Lock()
|
||||
l.locked = true
|
||||
}
|
||||
|
||||
func (l *LockFile) Unlock() {
|
||||
l.locked = false
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *LockFile) AssertLocked() {
|
||||
// DO NOT provide a variant that returns the value of l.locked.
|
||||
//
|
||||
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
|
||||
// we can’t tell the difference.
|
||||
//
|
||||
// Hence, this “AssertLocked” method, which exists only for sanity checks.
|
||||
if !l.locked {
|
||||
panic("internal error: lock is not held by the expected owner")
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LockFile) AssertLockedForWriting() {
|
||||
// DO NOT provide a variant that returns the current lock state.
|
||||
//
|
||||
// The same caveats as for AssertLocked apply equally.
|
||||
l.AssertLocked() // The current implementation does not distinguish between read and write locks.
|
||||
}
|
||||
|
||||
// GetLastWrite() returns a LastWrite value corresponding to current state of the lock.
|
||||
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
|
||||
// This is typically called before (_not after_) loading the state when initializing a consumer
|
||||
// of the data protected by the lock.
|
||||
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
|
||||
|
|
@ -87,7 +25,18 @@ func (l *LockFile) AssertLockedForWriting() {
|
|||
// The caller must hold the lock (for reading or writing) before this function is called.
|
||||
func (l *LockFile) GetLastWrite() (LastWrite, error) {
|
||||
l.AssertLocked()
|
||||
return LastWrite{}, nil
|
||||
contents := make([]byte, lastWriterIDSize)
|
||||
ol := new(windows.Overlapped)
|
||||
var n uint32
|
||||
err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol)
|
||||
if err != nil && err != windows.ERROR_HANDLE_EOF {
|
||||
return LastWrite{}, err
|
||||
}
|
||||
// It is important to handle the partial read case, because
|
||||
// the initial size of the lock file is zero, which is a valid
|
||||
// state (no writes yet)
|
||||
contents = contents[:n]
|
||||
return newLastWriteFromData(contents), nil
|
||||
}
|
||||
|
||||
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
|
||||
|
|
@ -102,47 +51,22 @@ func (l *LockFile) GetLastWrite() (LastWrite, error) {
|
|||
//
|
||||
// The caller must hold the lock for writing.
|
||||
func (l *LockFile) RecordWrite() (LastWrite, error) {
|
||||
return LastWrite{}, nil
|
||||
}
|
||||
|
||||
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
|
||||
// and returns the one to record instead.
|
||||
//
|
||||
// If ModifiedSince reports no modification, the previous LastWrite value
|
||||
// is still valid and can continue to be used.
|
||||
//
|
||||
// If this function fails, the LastWriter value of the lock is indeterminate;
|
||||
// the caller should fail and keep using the previously-recorded LastWrite value,
|
||||
// so that it continues failing until the situation is resolved. Similarly,
|
||||
// it should only update the recorded LastWrite value after processing the update:
|
||||
//
|
||||
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
|
||||
// if err != nil { /* fail */ }
|
||||
// state.lastWrite = lw2
|
||||
// if modified {
|
||||
// if err := reload(); err != nil { /* fail */ }
|
||||
// state.lastWrite = lw2
|
||||
// }
|
||||
//
|
||||
// The caller must hold the lock (for reading or writing).
|
||||
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
|
||||
return LastWrite{}, false, nil
|
||||
}
|
||||
|
||||
// Deprecated: Use *LockFile.ModifiedSince.
|
||||
func (l *LockFile) Modified() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Deprecated: Use *LockFile.RecordWrite.
|
||||
func (l *LockFile) Touch() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LockFile) IsReadWrite() bool {
|
||||
return false
|
||||
l.AssertLockedForWriting()
|
||||
lw := newLastWrite()
|
||||
lockContents := lw.serialize()
|
||||
ol := new(windows.Overlapped)
|
||||
var n uint32
|
||||
err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol)
|
||||
if err != nil {
|
||||
return LastWrite{}, err
|
||||
}
|
||||
if int(n) != len(lockContents) {
|
||||
return LastWrite{}, windows.ERROR_DISK_FULL
|
||||
}
|
||||
return lw, nil
|
||||
}
|
||||
|
||||
// TouchedSince indicates if the lock file has been touched since the specified time
|
||||
func (l *LockFile) TouchedSince(when time.Time) bool {
|
||||
stat, err := os.Stat(l.file)
|
||||
if err != nil {
|
||||
|
|
@ -150,3 +74,26 @@ func (l *LockFile) TouchedSince(when time.Time) bool {
|
|||
}
|
||||
return when.Before(stat.ModTime())
|
||||
}
|
||||
|
||||
func openHandle(path string, mode int) (fileHandle, error) {
|
||||
mode |= windows.O_CLOEXEC
|
||||
fd, err := windows.Open(path, mode, windows.S_IWRITE)
|
||||
return fileHandle(fd), err
|
||||
}
|
||||
|
||||
func lockHandle(fd fileHandle, lType lockType) {
|
||||
flags := 0
|
||||
if lType != readLock {
|
||||
flags = windows.LOCKFILE_EXCLUSIVE_LOCK
|
||||
}
|
||||
ol := new(windows.Overlapped)
|
||||
if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func unlockAndCloseHandle(fd fileHandle) {
|
||||
ol := new(windows.Overlapped)
|
||||
windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
|
||||
windows.Close(windows.Handle(fd))
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/containers/storage/pkg/system/stat_unix.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/system/stat_unix.go
generated
vendored
|
|
@ -7,6 +7,8 @@ import (
|
|||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// StatT type contains status of a file. It contains metadata
|
||||
|
|
@ -57,6 +59,10 @@ func (s StatT) Dev() uint64 {
|
|||
return s.dev
|
||||
}
|
||||
|
||||
func (s StatT) IsDir() bool {
|
||||
return (s.mode & unix.S_IFDIR) != 0
|
||||
}
|
||||
|
||||
// Stat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
|
|
|
|||
4
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
|
|
@ -48,6 +48,10 @@ func (s StatT) Dev() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (s StatT) IsDir() bool {
|
||||
return s.Mode().IsDir()
|
||||
}
|
||||
|
||||
// Stat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
|
|
|
|||
8
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
8
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
|
|
@ -26,7 +26,7 @@ func isValidCredsMessage(msg string) error {
|
|||
|
||||
// Store uses an external program to save credentials.
|
||||
func Store(program ProgramFunc, creds *credentials.Credentials) error {
|
||||
cmd := program("store")
|
||||
cmd := program(credentials.ActionStore)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(buffer).Encode(creds); err != nil {
|
||||
|
|
@ -50,7 +50,7 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error {
|
|||
|
||||
// Get executes an external program to get the credentials from a native store.
|
||||
func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
|
||||
cmd := program("get")
|
||||
cmd := program(credentials.ActionGet)
|
||||
cmd.Input(strings.NewReader(serverURL))
|
||||
|
||||
out, err := cmd.Output()
|
||||
|
|
@ -81,7 +81,7 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error
|
|||
|
||||
// Erase executes a program to remove the server credentials from the native store.
|
||||
func Erase(program ProgramFunc, serverURL string) error {
|
||||
cmd := program("erase")
|
||||
cmd := program(credentials.ActionErase)
|
||||
cmd.Input(strings.NewReader(serverURL))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
|
|
@ -99,7 +99,7 @@ func Erase(program ProgramFunc, serverURL string) error {
|
|||
|
||||
// List executes a program to list server credentials in the native store.
|
||||
func List(program ProgramFunc) (map[string]string, error) {
|
||||
cmd := program("list")
|
||||
cmd := program(credentials.ActionList)
|
||||
cmd.Input(strings.NewReader("unused"))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
|
|
|
|||
13
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
13
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
|
|
@ -1,11 +1,9 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
exec "golang.org/x/sys/execabs"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Program is an interface to execute external programs.
|
||||
|
|
@ -31,27 +29,26 @@ func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc
|
|||
|
||||
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
|
||||
programCmd := exec.Command(commandName, args...)
|
||||
programCmd.Env = os.Environ()
|
||||
if env != nil {
|
||||
for k, v := range *env {
|
||||
programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
|
||||
programCmd.Env = append(programCmd.Environ(), k+"="+v)
|
||||
}
|
||||
}
|
||||
programCmd.Stderr = os.Stderr
|
||||
return programCmd
|
||||
}
|
||||
|
||||
// Shell invokes shell commands to talk with a remote credentials helper.
|
||||
// Shell invokes shell commands to talk with a remote credentials-helper.
|
||||
type Shell struct {
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
// Output returns responses from the remote credentials helper.
|
||||
// Output returns responses from the remote credentials-helper.
|
||||
func (s *Shell) Output() ([]byte, error) {
|
||||
return s.cmd.Output()
|
||||
}
|
||||
|
||||
// Input sets the input to send to a remote credentials helper.
|
||||
// Input sets the input to send to a remote credentials-helper.
|
||||
func (s *Shell) Input(in io.Reader) {
|
||||
s.cmd.Stdin = in
|
||||
}
|
||||
|
|
|
|||
69
vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
generated
vendored
69
vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
generated
vendored
|
|
@ -10,6 +10,20 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// Action defines the name of an action (sub-command) supported by a
|
||||
// credential-helper binary. It is an alias for "string", and mostly
|
||||
// for convenience.
|
||||
type Action = string
|
||||
|
||||
// List of actions (sub-commands) supported by credential-helper binaries.
|
||||
const (
|
||||
ActionStore Action = "store"
|
||||
ActionGet Action = "get"
|
||||
ActionErase Action = "erase"
|
||||
ActionList Action = "list"
|
||||
ActionVersion Action = "version"
|
||||
)
|
||||
|
||||
// Credentials holds the information shared between docker and the credentials store.
|
||||
type Credentials struct {
|
||||
ServerURL string
|
||||
|
|
@ -43,42 +57,52 @@ func SetCredsLabel(label string) {
|
|||
CredsLabel = label
|
||||
}
|
||||
|
||||
// Serve initializes the credentials helper and parses the action argument.
|
||||
// Serve initializes the credentials-helper and parses the action argument.
|
||||
// This function is designed to be called from a command line interface.
|
||||
// It uses os.Args[1] as the key for the action.
|
||||
// It uses os.Stdin as input and os.Stdout as output.
|
||||
// This function terminates the program with os.Exit(1) if there is an error.
|
||||
func Serve(helper Helper) {
|
||||
var err error
|
||||
if len(os.Args) != 2 {
|
||||
err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
|
||||
_, _ = fmt.Fprintln(os.Stdout, usage())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
|
||||
switch os.Args[1] {
|
||||
case "--version", "-v":
|
||||
_ = PrintVersion(os.Stdout)
|
||||
os.Exit(0)
|
||||
case "--help", "-h":
|
||||
_, _ = fmt.Fprintln(os.Stdout, usage())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "%v\n", err)
|
||||
if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stdout, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleCommand uses a helper and a key to run a credential action.
|
||||
func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
|
||||
switch key {
|
||||
case "store":
|
||||
func usage() string {
|
||||
return fmt.Sprintf("Usage: %s <store|get|erase|list|version>", Name)
|
||||
}
|
||||
|
||||
// HandleCommand runs a helper to execute a credential action.
|
||||
func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error {
|
||||
switch action {
|
||||
case ActionStore:
|
||||
return Store(helper, in)
|
||||
case "get":
|
||||
case ActionGet:
|
||||
return Get(helper, in, out)
|
||||
case "erase":
|
||||
case ActionErase:
|
||||
return Erase(helper, in)
|
||||
case "list":
|
||||
case ActionList:
|
||||
return List(helper, out)
|
||||
case "version":
|
||||
case ActionVersion:
|
||||
return PrintVersion(out)
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown action: %s", Name, action)
|
||||
}
|
||||
return fmt.Errorf("Unknown credential action `%s`", key)
|
||||
}
|
||||
|
||||
// Store uses a helper and an input reader to save credentials.
|
||||
|
|
@ -132,18 +156,17 @@ func Get(helper Helper, reader io.Reader, writer io.Writer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
resp := Credentials{
|
||||
buffer.Reset()
|
||||
err = json.NewEncoder(buffer).Encode(Credentials{
|
||||
ServerURL: serverURL,
|
||||
Username: username,
|
||||
Secret: secret,
|
||||
}
|
||||
|
||||
buffer.Reset()
|
||||
if err := json.NewEncoder(buffer).Encode(resp); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprint(writer, buffer.String())
|
||||
_, _ = fmt.Fprint(writer, buffer.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -181,6 +204,6 @@ func List(helper Helper, writer io.Writer) error {
|
|||
|
||||
// PrintVersion outputs the current version.
|
||||
func PrintVersion(writer io.Writer) error {
|
||||
fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
|
||||
_, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
31
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
31
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
|
|
@ -1,5 +1,7 @@
|
|||
package credentials
|
||||
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
// ErrCredentialsNotFound standardizes the not found error, so every helper returns
|
||||
// the same message and docker can handle it properly.
|
||||
|
|
@ -21,6 +23,11 @@ func (errCredentialsNotFound) Error() string {
|
|||
return errCredentialsNotFoundMessage
|
||||
}
|
||||
|
||||
// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface.
|
||||
//
|
||||
// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound
|
||||
func (errCredentialsNotFound) NotFound() {}
|
||||
|
||||
// NewErrCredentialsNotFound creates a new error
|
||||
// for when the credentials are not in the store.
|
||||
func NewErrCredentialsNotFound() error {
|
||||
|
|
@ -30,8 +37,8 @@ func NewErrCredentialsNotFound() error {
|
|||
// IsErrCredentialsNotFound returns true if the error
|
||||
// was caused by not having a set of credentials in a store.
|
||||
func IsErrCredentialsNotFound(err error) bool {
|
||||
_, ok := err.(errCredentialsNotFound)
|
||||
return ok
|
||||
var target errCredentialsNotFound
|
||||
return errors.As(err, &target)
|
||||
}
|
||||
|
||||
// IsErrCredentialsNotFoundMessage returns true if the error
|
||||
|
|
@ -53,6 +60,12 @@ func (errCredentialsMissingServerURL) Error() string {
|
|||
return errCredentialsMissingServerURLMessage
|
||||
}
|
||||
|
||||
// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
|
||||
// interface.
|
||||
//
|
||||
// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
|
||||
func (errCredentialsMissingServerURL) InvalidParameter() {}
|
||||
|
||||
// errCredentialsMissingUsername represents an error raised
|
||||
// when the credentials object has no username or when no
|
||||
// username is provided to a credentials operation requiring
|
||||
|
|
@ -63,6 +76,12 @@ func (errCredentialsMissingUsername) Error() string {
|
|||
return errCredentialsMissingUsernameMessage
|
||||
}
|
||||
|
||||
// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
|
||||
// interface.
|
||||
//
|
||||
// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
|
||||
func (errCredentialsMissingUsername) InvalidParameter() {}
|
||||
|
||||
// NewErrCredentialsMissingServerURL creates a new error for
|
||||
// errCredentialsMissingServerURL.
|
||||
func NewErrCredentialsMissingServerURL() error {
|
||||
|
|
@ -78,8 +97,8 @@ func NewErrCredentialsMissingUsername() error {
|
|||
// IsCredentialsMissingServerURL returns true if the error
|
||||
// was an errCredentialsMissingServerURL.
|
||||
func IsCredentialsMissingServerURL(err error) bool {
|
||||
_, ok := err.(errCredentialsMissingServerURL)
|
||||
return ok
|
||||
var target errCredentialsMissingServerURL
|
||||
return errors.As(err, &target)
|
||||
}
|
||||
|
||||
// IsCredentialsMissingServerURLMessage checks for an
|
||||
|
|
@ -91,8 +110,8 @@ func IsCredentialsMissingServerURLMessage(err string) bool {
|
|||
// IsCredentialsMissingUsername returns true if the error
|
||||
// was an errCredentialsMissingUsername.
|
||||
func IsCredentialsMissingUsername(err error) bool {
|
||||
_, ok := err.(errCredentialsMissingUsername)
|
||||
return ok
|
||||
var target errCredentialsMissingUsername
|
||||
return errors.As(err, &target)
|
||||
}
|
||||
|
||||
// IsCredentialsMissingUsernameMessage checks for an
|
||||
|
|
|
|||
|
|
@ -1,8 +1,2 @@
|
|||
*~
|
||||
.*.swp
|
||||
*.out
|
||||
*.test
|
||||
*.pem
|
||||
*.cov
|
||||
jose-util/jose-util
|
||||
jose-util.t.err
|
||||
53
vendor/github.com/go-jose/go-jose/v3/.golangci.yml
generated
vendored
Normal file
53
vendor/github.com/go-jose/go-jose/v3/.golangci.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
# https://github.com/golangci/golangci-lint
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- doc_test.go
|
||||
modules-download-mode: readonly
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- gochecknoglobals
|
||||
- goconst
|
||||
- lll
|
||||
- maligned
|
||||
- nakedret
|
||||
- scopelint
|
||||
- unparam
|
||||
- funlen # added in 1.18 (requires go-jose changes before it can be enabled)
|
||||
|
||||
linters-settings:
|
||||
gocyclo:
|
||||
min-complexity: 35
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- text: "don't use ALL_CAPS in Go names"
|
||||
linters:
|
||||
- golint
|
||||
- text: "hardcoded credentials"
|
||||
linters:
|
||||
- gosec
|
||||
- text: "weak cryptographic primitive"
|
||||
linters:
|
||||
- gosec
|
||||
- path: json/
|
||||
linters:
|
||||
- dupl
|
||||
- errcheck
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- golint
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- unused
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- scopelint
|
||||
- path: jwk.go
|
||||
linters:
|
||||
- gocyclo
|
||||
33
vendor/github.com/go-jose/go-jose/v3/.travis.yml
generated
vendored
Normal file
33
vendor/github.com/go-jose/go-jose/v3/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
language: go
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
go:
|
||||
- "1.13.x"
|
||||
- "1.14.x"
|
||||
- tip
|
||||
|
||||
before_script:
|
||||
- export PATH=$HOME/.local/bin:$PATH
|
||||
|
||||
before_install:
|
||||
- go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0
|
||||
- pip install cram --user
|
||||
|
||||
script:
|
||||
- go test -v -covermode=count -coverprofile=profile.cov .
|
||||
- go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner
|
||||
- go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher
|
||||
- go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt
|
||||
- go test -v ./json # no coverage for forked encoding/json package
|
||||
- golangci-lint run
|
||||
- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
|
||||
- cd ..
|
||||
|
||||
after_success:
|
||||
- gocovmerge *.cov */*.cov > merged.coverprofile
|
||||
- goveralls -coverprofile merged.coverprofile -service=travis-ci
|
||||
|
|
@ -9,6 +9,7 @@ sure all tests pass by running `go test`, and format your code with `go fmt`.
|
|||
We also recommend using `golint` and `errcheck`.
|
||||
|
||||
Before your code can be accepted into the project you must also sign the
|
||||
[Individual Contributor License Agreement][1].
|
||||
Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
|
||||
will be prompted to sign once a pull request is opened.
|
||||
|
||||
[1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
|
||||
[1]: https://cla-assistant.io/
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
# Go JOSE
|
||||
# Go JOSE
|
||||
|
||||
[](https://godoc.org/gopkg.in/square/go-jose.v1)
|
||||
[](https://godoc.org/gopkg.in/square/go-jose.v2)
|
||||
[](https://raw.githubusercontent.com/square/go-jose/master/LICENSE)
|
||||
[](https://travis-ci.org/square/go-jose)
|
||||
[](https://coveralls.io/r/square/go-jose)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
||||
[](https://travis-ci.org/go-jose/go-jose)
|
||||
[](https://coveralls.io/r/go-jose/go-jose)
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
||||
|
|
@ -23,11 +23,11 @@ US maintained blocked list.
|
|||
The implementation follows the
|
||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519).
|
||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
|
||||
Tables of supported algorithms are shown below. The library supports both
|
||||
the compact and full serialization formats, and has optional support for
|
||||
the compact and JWS/JWE JSON Serialization formats, and has optional support for
|
||||
multiple recipients. It also comes with a small command-line utility
|
||||
([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util))
|
||||
([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util))
|
||||
for dealing with JOSE messages in a shell.
|
||||
|
||||
**Note**: We use a forked version of the `encoding/json` package from the Go
|
||||
|
|
@ -38,20 +38,24 @@ libraries in other languages.
|
|||
|
||||
### Versions
|
||||
|
||||
We use [gopkg.in](https://gopkg.in) for versioning.
|
||||
[Version 2](https://gopkg.in/go-jose/go-jose.v2)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/v2),
|
||||
[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version:
|
||||
|
||||
[Version 2](https://gopkg.in/square/go-jose.v2)
|
||||
([branch](https://github.com/square/go-jose/tree/v2),
|
||||
[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version:
|
||||
import "gopkg.in/go-jose/go-jose.v2"
|
||||
|
||||
import "gopkg.in/square/go-jose.v2"
|
||||
[Version 3](https://github.com/go-jose/go-jose)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/master),
|
||||
[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet):
|
||||
|
||||
The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will
|
||||
still receive backported bug fixes and security fixes, but otherwise
|
||||
development is frozen. All new feature development takes place on the `v2`
|
||||
branch. Version 2 also contains additional sub-packages such as the
|
||||
[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation
|
||||
contributed by [@shaxbee](https://github.com/shaxbee).
|
||||
import "github.com/go-jose/go-jose/v3"
|
||||
|
||||
All new feature development takes place on the `master` branch, which we are
|
||||
preparing to release as version 3 soon. Version 2 will continue to receive
|
||||
critical bug and security fixes. Note that starting with version 3 we are
|
||||
using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher.
|
||||
|
||||
Version 1 (on the `v1` branch) is frozen and not supported anymore.
|
||||
|
||||
### Supported algorithms
|
||||
|
||||
|
|
@ -84,7 +88,7 @@ standard where possible. The Godoc reference has a list of constants.
|
|||
Content encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
|
||||
AES-GCM | A128GCM, A192GCM, A256GCM
|
||||
AES-GCM | A128GCM, A192GCM, A256GCM
|
||||
|
||||
Compression | Algorithm identifiers(s)
|
||||
:------------------------- | -------------------------------
|
||||
|
|
@ -101,18 +105,18 @@ allows attaching a key id.
|
|||
:------------------------- | -------------------------------
|
||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey)
|
||||
AES, HMAC | []byte
|
||||
|
||||
<sup>1. Only available in version 2 of the package</sup>
|
||||
<sup>1. Only available in version 2 or later of the package</sup>
|
||||
|
||||
## Examples
|
||||
|
||||
[](https://godoc.org/gopkg.in/square/go-jose.v1)
|
||||
[](https://godoc.org/gopkg.in/square/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
|
||||
Examples can be found in the Godoc
|
||||
reference for this package. The
|
||||
[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)
|
||||
subdirectory also contains a small command-line utility which might be useful
|
||||
as an example.
|
||||
as an example as well.
|
||||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
|
|
@ -28,9 +29,8 @@ import (
|
|||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
josecipher "gopkg.in/square/go-jose.v2/cipher"
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
josecipher "github.com/go-jose/go-jose/v3/cipher"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// A generic RSA-based encrypter/verifier
|
||||
|
|
@ -413,28 +413,28 @@ func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
|||
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
epk, err := headers.getEPK()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid epk header")
|
||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
||||
}
|
||||
if epk == nil {
|
||||
return nil, errors.New("square/go-jose: missing epk header")
|
||||
return nil, errors.New("go-jose/go-jose: missing epk header")
|
||||
}
|
||||
|
||||
publicKey, ok := epk.Key.(*ecdsa.PublicKey)
|
||||
if publicKey == nil || !ok {
|
||||
return nil, errors.New("square/go-jose: invalid epk header")
|
||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
||||
}
|
||||
|
||||
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return nil, errors.New("square/go-jose: invalid public key in epk header")
|
||||
return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
|
||||
}
|
||||
|
||||
apuData, err := headers.getAPU()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid apu header")
|
||||
return nil, errors.New("go-jose/go-jose: invalid apu header")
|
||||
}
|
||||
apvData, err := headers.getAPV()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid apv header")
|
||||
return nil, errors.New("go-jose/go-jose: invalid apv header")
|
||||
}
|
||||
|
||||
deriveKey := func(algID string, size int) []byte {
|
||||
|
|
@ -489,7 +489,7 @@ func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, a
|
|||
}
|
||||
ok := ed25519.Verify(ctx.publicKey, payload, signature)
|
||||
if !ok {
|
||||
return errors.New("square/go-jose: ed25519 signature failed to verify")
|
||||
return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -513,7 +513,7 @@ func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm)
|
|||
|
||||
curveBits := ctx.privateKey.Curve.Params().BitSize
|
||||
if expectedBitSize != curveBits {
|
||||
return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
||||
return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
|
@ -571,7 +571,7 @@ func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, a
|
|||
}
|
||||
|
||||
if len(signature) != 2*keySize {
|
||||
return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
||||
return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
|
@ -585,7 +585,7 @@ func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, a
|
|||
|
||||
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
|
||||
if !match {
|
||||
return errors.New("square/go-jose: ecdsa signature failed to verify")
|
||||
return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -101,23 +101,23 @@ func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
|
|||
// Open decrypts and authenticates the ciphertext.
|
||||
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||
if len(ciphertext) < ctx.authtagBytes {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
|
||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
|
||||
}
|
||||
|
||||
offset := len(ciphertext) - ctx.authtagBytes
|
||||
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
|
||||
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
|
||||
if match != 1 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
|
||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
|
||||
}
|
||||
|
||||
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
|
||||
|
||||
// Make copy of ciphertext buffer, don't want to modify in place
|
||||
buffer := append([]byte{}, []byte(ciphertext[:offset])...)
|
||||
buffer := append([]byte{}, ciphertext[:offset]...)
|
||||
|
||||
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
|
||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
|
||||
}
|
||||
|
||||
cbc.CryptBlocks(buffer, buffer)
|
||||
|
|
@ -177,19 +177,19 @@ func padBuffer(buffer []byte, blockSize int) []byte {
|
|||
// Remove padding
|
||||
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
|
||||
if len(buffer)%blockSize != 0 {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
last := buffer[len(buffer)-1]
|
||||
count := int(last)
|
||||
|
||||
if count == 0 || count > blockSize || count > len(buffer) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
padding := bytes.Repeat([]byte{last}, count)
|
||||
if !bytes.HasSuffix(buffer, padding) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
return buffer[:len(buffer)-count], nil
|
||||
|
|
@ -28,7 +28,7 @@ var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
|
|||
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
||||
if len(cek)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := len(cek) / 8
|
||||
|
|
@ -51,7 +51,7 @@ func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
|||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
buffer[i] ^= tBytes[i]
|
||||
}
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
|
@ -68,7 +68,7 @@ func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
|||
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
||||
if len(ciphertext)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := (len(ciphertext) / 8) - 1
|
||||
|
|
@ -87,7 +87,7 @@ func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
|||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
buffer[i] ^= tBytes[i]
|
||||
}
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
|
|
@ -97,7 +97,7 @@ func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
|
||||
return nil, errors.New("square/go-jose: failed to unwrap key")
|
||||
return nil, errors.New("go-jose/go-jose: failed to unwrap key")
|
||||
}
|
||||
|
||||
out := make([]byte, n*8)
|
||||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// Encrypter represents an encrypter which produces an encrypted JWE object.
|
||||
|
|
@ -201,8 +201,8 @@ func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *Encrypter
|
|||
if cipher == nil {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
if rcpts == nil || len(rcpts) == 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: recipients is nil or empty")
|
||||
if len(rcpts) == 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
|
||||
}
|
||||
|
||||
encrypter := &genericEncrypter{
|
||||
|
|
@ -234,7 +234,7 @@ func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
|
|||
|
||||
switch recipient.Algorithm {
|
||||
case DIRECT, ECDH_ES:
|
||||
return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
|
||||
return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
|
||||
}
|
||||
|
||||
recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
|
||||
|
|
@ -326,7 +326,7 @@ func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWe
|
|||
obj.recipients = make([]recipientInfo, len(ctx.recipients))
|
||||
|
||||
if len(ctx.recipients) == 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
|
||||
}
|
||||
|
||||
cek, headers, err := ctx.keyGenerator.genKey()
|
||||
|
|
@ -410,26 +410,27 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
|
|||
headers := obj.mergedHeaders(nil)
|
||||
|
||||
if len(obj.recipients) > 1 {
|
||||
return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one")
|
||||
return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
|
||||
}
|
||||
|
||||
critical, err := headers.getCritical()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid crit header")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
|
||||
}
|
||||
|
||||
if len(critical) > 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported crit header")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
decrypter, err := newDecrypter(decryptionKey)
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cipher := getContentCipher(headers.getEncryption())
|
||||
if cipher == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
|
||||
}
|
||||
|
||||
generator := randomKeyGenerator{
|
||||
|
|
@ -475,14 +476,15 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
|
|||
|
||||
critical, err := globalHeaders.getCritical()
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header")
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
|
||||
}
|
||||
|
||||
if len(critical) > 0 {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header")
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
decrypter, err := newDecrypter(decryptionKey)
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, err
|
||||
}
|
||||
|
|
@ -490,7 +492,7 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
|
|||
encryption := globalHeaders.getEncryption()
|
||||
cipher := getContentCipher(encryption)
|
||||
if cipher == nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption))
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
|
||||
}
|
||||
|
||||
generator := randomKeyGenerator{
|
||||
|
|
@ -524,18 +526,18 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
|
|||
}
|
||||
}
|
||||
|
||||
if plaintext == nil || err != nil {
|
||||
if plaintext == nil {
|
||||
return -1, Header{}, nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
plaintext, _ = decompress(comp, plaintext)
|
||||
}
|
||||
|
||||
sanitized, err := headers.sanitized()
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err)
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
|
||||
}
|
||||
|
||||
return index, sanitized, plaintext, err
|
||||
|
|
@ -18,9 +18,9 @@
|
|||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. It implements encryption and signing based on
|
||||
the JSON Web Encryption and JSON Web Signature standards, with optional JSON
|
||||
Web Token support available in a sub-package. The library supports both the
|
||||
compact and full serialization formats, and has optional support for multiple
|
||||
the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
|
||||
Token support available in a sub-package. The library supports both the compact
|
||||
and JWS/JWE JSON Serialization formats, and has optional support for multiple
|
||||
recipients.
|
||||
|
||||
*/
|
||||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"strings"
|
||||
"unicode"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// Helper function to serialize known-good objects.
|
||||
|
|
@ -41,7 +41,7 @@ func mustSerializeJSON(value interface{}) []byte {
|
|||
// MarshalJSON will happily serialize it as the top-level value "null". If
|
||||
// that value is then embedded in another operation, for instance by being
|
||||
// base64-encoded and fed as input to a signing algorithm
|
||||
// (https://github.com/square/go-jose/issues/22), the result will be
|
||||
// (https://github.com/go-jose/go-jose/issues/22), the result will be
|
||||
// incorrect. Because this method is intended for known-good objects, and a nil
|
||||
// pointer is not a known-good object, we are free to panic in this case.
|
||||
// Note: It's not possible to directly check whether the data pointed at by an
|
||||
|
|
@ -127,7 +127,7 @@ func newBuffer(data []byte) *byteBuffer {
|
|||
|
||||
func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
|
||||
if len(data) > length {
|
||||
panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
|
||||
panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
|
||||
}
|
||||
pad := make([]byte, length-len(data))
|
||||
return newBuffer(append(pad, data...))
|
||||
|
|
@ -154,7 +154,7 @@ func (b *byteBuffer) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
decoded, err := base64.RawURLEncoding.DecodeString(encoded)
|
||||
decoded, err := base64URLDecode(encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -183,3 +183,9 @@ func (b byteBuffer) bigInt() *big.Int {
|
|||
func (b byteBuffer) toInt() int {
|
||||
return int(b.bigInt().Int64())
|
||||
}
|
||||
|
||||
// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C
|
||||
func base64URLDecode(value string) ([]byte, error) {
|
||||
value = strings.TrimRight(value, "=")
|
||||
return base64.RawURLEncoding.DecodeString(value)
|
||||
}
|
||||
|
|
@ -648,7 +648,7 @@ func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
|
|||
// for large buffers, avoid unnecessary extra temporary
|
||||
// buffer space.
|
||||
enc := base64.NewEncoder(base64.StdEncoding, e)
|
||||
enc.Write(s)
|
||||
_, _ = enc.Write(s)
|
||||
enc.Close()
|
||||
}
|
||||
e.WriteByte('"')
|
||||
31
vendor/gopkg.in/square/go-jose.v2/jwe.go → vendor/github.com/go-jose/go-jose/v3/jwe.go
generated
vendored
31
vendor/gopkg.in/square/go-jose.v2/jwe.go → vendor/github.com/go-jose/go-jose/v3/jwe.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
|
||||
|
|
@ -86,11 +86,12 @@ func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
|
|||
func (obj JSONWebEncryption) computeAuthData() []byte {
|
||||
var protected string
|
||||
|
||||
if obj.original != nil && obj.original.Protected != nil {
|
||||
switch {
|
||||
case obj.original != nil && obj.original.Protected != nil:
|
||||
protected = obj.original.Protected.base64()
|
||||
} else if obj.protected != nil {
|
||||
case obj.protected != nil:
|
||||
protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected)))
|
||||
} else {
|
||||
default:
|
||||
protected = ""
|
||||
}
|
||||
|
||||
|
|
@ -103,7 +104,7 @@ func (obj JSONWebEncryption) computeAuthData() []byte {
|
|||
return output
|
||||
}
|
||||
|
||||
// ParseEncrypted parses an encrypted message in compact or full serialization format.
|
||||
// ParseEncrypted parses an encrypted message in compact or JWE JSON Serialization format.
|
||||
func ParseEncrypted(input string) (*JSONWebEncryption, error) {
|
||||
input = stripWhitespace(input)
|
||||
if strings.HasPrefix(input, "{") {
|
||||
|
|
@ -146,7 +147,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
|||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
||||
err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -156,7 +157,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
|||
mergedHeaders := obj.mergedHeaders(nil)
|
||||
obj.Header, err = mergedHeaders.sanitized()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
|
||||
}
|
||||
|
||||
if len(parsed.Recipients) == 0 {
|
||||
|
|
@ -169,7 +170,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
|||
} else {
|
||||
obj.recipients = make([]recipientInfo, len(parsed.Recipients))
|
||||
for r := range parsed.Recipients {
|
||||
encryptedKey, err := base64.RawURLEncoding.DecodeString(parsed.Recipients[r].EncryptedKey)
|
||||
encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -187,7 +188,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
|||
for _, recipient := range obj.recipients {
|
||||
headers := obj.mergedHeaders(&recipient)
|
||||
if headers.getAlgorithm() == "" || headers.getEncryption() == "" {
|
||||
return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -203,30 +204,30 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
|||
func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 5 {
|
||||
return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
|
||||
}
|
||||
|
||||
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
|
||||
rawProtected, err := base64URLDecode(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encryptedKey, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
encryptedKey, err := base64URLDecode(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iv, err := base64.RawURLEncoding.DecodeString(parts[2])
|
||||
iv, err := base64URLDecode(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ciphertext, err := base64.RawURLEncoding.DecodeString(parts[3])
|
||||
ciphertext, err := base64URLDecode(parts[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tag, err := base64.RawURLEncoding.DecodeString(parts[4])
|
||||
tag, err := base64URLDecode(parts[4])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
138
vendor/gopkg.in/square/go-jose.v2/jwk.go → vendor/github.com/go-jose/go-jose/v3/jwk.go
generated
vendored
138
vendor/gopkg.in/square/go-jose.v2/jwk.go → vendor/github.com/go-jose/go-jose/v3/jwk.go
generated
vendored
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
|
|
@ -34,9 +35,7 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing.
|
||||
|
|
@ -63,7 +62,7 @@ type rawJSONWebKey struct {
|
|||
Qi *byteBuffer `json:"qi,omitempty"`
|
||||
// Certificates
|
||||
X5c []string `json:"x5c,omitempty"`
|
||||
X5u *url.URL `json:"x5u,omitempty"`
|
||||
X5u string `json:"x5u,omitempty"`
|
||||
X5tSHA1 string `json:"x5t,omitempty"`
|
||||
X5tSHA256 string `json:"x5t#S256,omitempty"`
|
||||
}
|
||||
|
|
@ -110,7 +109,7 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) {
|
|||
case []byte:
|
||||
raw, err = fromSymmetricKey(key)
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -129,13 +128,13 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) {
|
|||
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
|
||||
if x5tSHA1Len > 0 {
|
||||
if x5tSHA1Len != sha1.Size {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid SHA-1 thumbprint (must be %d bytes, not %d)", sha1.Size, x5tSHA1Len)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-1 thumbprint (must be %d bytes, not %d)", sha1.Size, x5tSHA1Len)
|
||||
}
|
||||
raw.X5tSHA1 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA1)
|
||||
}
|
||||
if x5tSHA256Len > 0 {
|
||||
if x5tSHA256Len != sha256.Size {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid SHA-256 thumbprint (must be %d bytes, not %d)", sha256.Size, x5tSHA256Len)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-256 thumbprint (must be %d bytes, not %d)", sha256.Size, x5tSHA256Len)
|
||||
}
|
||||
raw.X5tSHA256 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA256)
|
||||
}
|
||||
|
|
@ -149,14 +148,16 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) {
|
|||
expectedSHA256 := sha256.Sum256(k.Certificates[0].Raw)
|
||||
|
||||
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(k.CertificateThumbprintSHA1, expectedSHA1[:]) {
|
||||
return nil, errors.New("square/go-jose: invalid SHA-1 thumbprint, does not match cert chain")
|
||||
return nil, errors.New("go-jose/go-jose: invalid SHA-1 thumbprint, does not match cert chain")
|
||||
}
|
||||
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(k.CertificateThumbprintSHA256, expectedSHA256[:]) {
|
||||
return nil, errors.New("square/go-jose: invalid or SHA-256 thumbprint, does not match cert chain")
|
||||
return nil, errors.New("go-jose/go-jose: invalid or SHA-256 thumbprint, does not match cert chain")
|
||||
}
|
||||
}
|
||||
|
||||
raw.X5u = k.CertificatesURL
|
||||
if k.CertificatesURL != nil {
|
||||
raw.X5u = k.CertificatesURL.String()
|
||||
}
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
||||
|
|
@ -171,7 +172,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
|
||||
certs, err := parseCertificateChain(raw.X5c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("square/go-jose: failed to unmarshal x5c field: %s", err)
|
||||
return fmt.Errorf("go-jose/go-jose: failed to unmarshal x5c field: %s", err)
|
||||
}
|
||||
|
||||
var key interface{}
|
||||
|
|
@ -211,7 +212,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
}
|
||||
case "oct":
|
||||
if certPub != nil {
|
||||
return errors.New("square/go-jose: invalid JWK, found 'oct' (symmetric) key with cert chain")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, found 'oct' (symmetric) key with cert chain")
|
||||
}
|
||||
key, err = raw.symmetricKey()
|
||||
case "OKP":
|
||||
|
|
@ -226,10 +227,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
keyPub = key
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("square/go-jose: unknown curve %s'", raw.Crv)
|
||||
err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty)
|
||||
err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -238,19 +239,24 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
|
||||
if certPub != nil && keyPub != nil {
|
||||
if !reflect.DeepEqual(certPub, keyPub) {
|
||||
return errors.New("square/go-jose: invalid JWK, public keys in key and x5c fields do not match")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match")
|
||||
}
|
||||
}
|
||||
|
||||
*k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use, Certificates: certs}
|
||||
|
||||
k.CertificatesURL = raw.X5u
|
||||
if raw.X5u != "" {
|
||||
k.CertificatesURL, err = url.Parse(raw.X5u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, x5u header is invalid URL: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// x5t parameters are base64url-encoded SHA thumbprints
|
||||
// See RFC 7517, Section 4.8, https://tools.ietf.org/html/rfc7517#section-4.8
|
||||
x5tSHA1bytes, err := base64.RawURLEncoding.DecodeString(raw.X5tSHA1)
|
||||
x5tSHA1bytes, err := base64URLDecode(raw.X5tSHA1)
|
||||
if err != nil {
|
||||
return errors.New("square/go-jose: invalid JWK, x5t header has invalid encoding")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t header has invalid encoding")
|
||||
}
|
||||
|
||||
// RFC 7517, Section 4.8 is ambiguous as to whether the digest output should be byte or hex,
|
||||
|
|
@ -260,7 +266,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
if len(x5tSHA1bytes) == 2*sha1.Size {
|
||||
hx, err := hex.DecodeString(string(x5tSHA1bytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("square/go-jose: invalid JWK, unable to hex decode x5t: %v", err)
|
||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t: %v", err)
|
||||
|
||||
}
|
||||
x5tSHA1bytes = hx
|
||||
|
|
@ -268,15 +274,15 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
|
||||
k.CertificateThumbprintSHA1 = x5tSHA1bytes
|
||||
|
||||
x5tSHA256bytes, err := base64.RawURLEncoding.DecodeString(raw.X5tSHA256)
|
||||
x5tSHA256bytes, err := base64URLDecode(raw.X5tSHA256)
|
||||
if err != nil {
|
||||
return errors.New("square/go-jose: invalid JWK, x5t#S256 header has invalid encoding")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header has invalid encoding")
|
||||
}
|
||||
|
||||
if len(x5tSHA256bytes) == 2*sha256.Size {
|
||||
hx256, err := hex.DecodeString(string(x5tSHA256bytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("square/go-jose: invalid JWK, unable to hex decode x5t#S256: %v", err)
|
||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t#S256: %v", err)
|
||||
}
|
||||
x5tSHA256bytes = hx256
|
||||
}
|
||||
|
|
@ -286,10 +292,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
x5tSHA1Len := len(k.CertificateThumbprintSHA1)
|
||||
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
|
||||
if x5tSHA1Len > 0 && x5tSHA1Len != sha1.Size {
|
||||
return errors.New("square/go-jose: invalid JWK, x5t header is of incorrect size")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t header is of incorrect size")
|
||||
}
|
||||
if x5tSHA256Len > 0 && x5tSHA256Len != sha256.Size {
|
||||
return errors.New("square/go-jose: invalid JWK, x5t#S256 header is of incorrect size")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header is of incorrect size")
|
||||
}
|
||||
|
||||
// If certificate chain *and* thumbprints are set, verify correctness.
|
||||
|
|
@ -299,11 +305,11 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|||
sha256sum := sha256.Sum256(leaf.Raw)
|
||||
|
||||
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(sha1sum[:], k.CertificateThumbprintSHA1) {
|
||||
return errors.New("square/go-jose: invalid JWK, x5c thumbprint does not match x5t value")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t value")
|
||||
}
|
||||
|
||||
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(sha256sum[:], k.CertificateThumbprintSHA256) {
|
||||
return errors.New("square/go-jose: invalid JWK, x5c thumbprint does not match x5t#S256 value")
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t#S256 value")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -342,7 +348,7 @@ func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
|
|||
}
|
||||
|
||||
if len(x.Bytes()) > coordLength || len(y.Bytes()) > coordLength {
|
||||
return "", errors.New("square/go-jose: invalid elliptic key (too large)")
|
||||
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
|
||||
}
|
||||
|
||||
return fmt.Sprintf(ecThumbprintTemplate, crv,
|
||||
|
|
@ -359,7 +365,7 @@ func rsaThumbprintInput(n *big.Int, e int) (string, error) {
|
|||
func edThumbprintInput(ed ed25519.PublicKey) (string, error) {
|
||||
crv := "Ed25519"
|
||||
if len(ed) > 32 {
|
||||
return "", errors.New("square/go-jose: invalid elliptic key (too large)")
|
||||
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
|
||||
}
|
||||
return fmt.Sprintf(edThumbprintTemplate, crv,
|
||||
newFixedSizeBuffer(ed, 32).base64()), nil
|
||||
|
|
@ -384,7 +390,7 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
|
|||
case ed25519.PrivateKey:
|
||||
input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -392,7 +398,7 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
|
|||
}
|
||||
|
||||
h := hash.New()
|
||||
h.Write([]byte(input))
|
||||
_, _ = h.Write([]byte(input))
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
|
|
@ -463,7 +469,7 @@ func (k *JSONWebKey) Valid() bool {
|
|||
|
||||
func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
|
||||
if key.N == nil || key.E == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA key, missing n/e values")
|
||||
}
|
||||
|
||||
return &rsa.PublicKey{
|
||||
|
|
@ -498,29 +504,29 @@ func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
|
|||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil {
|
||||
return nil, errors.New("square/go-jose: invalid EC key, missing x/y values")
|
||||
return nil, errors.New("go-jose/go-jose: invalid EC key, missing x/y values")
|
||||
}
|
||||
|
||||
// The length of this octet string MUST be the full size of a coordinate for
|
||||
// the curve specified in the "crv" parameter.
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
|
||||
if curveSize(curve) != len(key.X.data) {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC public key, wrong length for x")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for x")
|
||||
}
|
||||
|
||||
if curveSize(curve) != len(key.Y.data) {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC public key, wrong length for y")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for y")
|
||||
}
|
||||
|
||||
x := key.X.bigInt()
|
||||
y := key.Y.bigInt()
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
}
|
||||
|
||||
return &ecdsa.PublicKey{
|
||||
|
|
@ -532,7 +538,7 @@ func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
|
|||
|
||||
func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
|
||||
if pub == nil || pub.X == nil || pub.Y == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (nil, or X/Y missing)")
|
||||
}
|
||||
|
||||
name, err := curveName(pub.Curve)
|
||||
|
|
@ -546,7 +552,7 @@ func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
|
|||
yBytes := pub.Y.Bytes()
|
||||
|
||||
if len(xBytes) > size || len(yBytes) > size {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (X/Y too large)")
|
||||
}
|
||||
|
||||
key := &rawJSONWebKey{
|
||||
|
|
@ -569,7 +575,7 @@ func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
|
|||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
privateKey := make([]byte, ed25519.PrivateKeySize)
|
||||
|
|
@ -581,7 +587,7 @@ func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
|
|||
|
||||
func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
|
||||
if key.X == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid Ed key, missing x value")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed key, missing x value")
|
||||
}
|
||||
publicKey := make([]byte, ed25519.PublicKeySize)
|
||||
copy(publicKey[0:32], key.X.bytes())
|
||||
|
|
@ -605,7 +611,7 @@ func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
|
|||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
rv := &rsa.PrivateKey{
|
||||
|
|
@ -675,34 +681,34 @@ func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
|
|||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil || key.D == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values")
|
||||
}
|
||||
|
||||
// The length of this octet string MUST be the full size of a coordinate for
|
||||
// the curve specified in the "crv" parameter.
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
|
||||
if curveSize(curve) != len(key.X.data) {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for x")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for x")
|
||||
}
|
||||
|
||||
if curveSize(curve) != len(key.Y.data) {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for y")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for y")
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
|
||||
if dSize(curve) != len(key.D.data) {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for d")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for d")
|
||||
}
|
||||
|
||||
x := key.X.bigInt()
|
||||
y := key.Y.bigInt()
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
}
|
||||
|
||||
return &ecdsa.PrivateKey{
|
||||
|
|
@ -722,7 +728,7 @@ func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) {
|
|||
}
|
||||
|
||||
if ec.D == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key")
|
||||
}
|
||||
|
||||
raw.D = newFixedSizeBuffer(ec.D.Bytes(), dSize(ec.PublicKey.Curve))
|
||||
|
|
@ -740,7 +746,7 @@ func dSize(curve elliptic.Curve) int {
|
|||
bitLen := order.BitLen()
|
||||
size := bitLen / 8
|
||||
if bitLen%8 != 0 {
|
||||
size = size + 1
|
||||
size++
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
|
@ -754,7 +760,39 @@ func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) {
|
|||
|
||||
func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
|
||||
if key.K == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid OCT (symmetric) key, missing k value")
|
||||
}
|
||||
return key.K.bytes(), nil
|
||||
}
|
||||
|
||||
func tryJWKS(key interface{}, headers ...Header) interface{} {
|
||||
var jwks JSONWebKeySet
|
||||
|
||||
switch jwksType := key.(type) {
|
||||
case *JSONWebKeySet:
|
||||
jwks = *jwksType
|
||||
case JSONWebKeySet:
|
||||
jwks = jwksType
|
||||
default:
|
||||
return key
|
||||
}
|
||||
|
||||
var kid string
|
||||
for _, header := range headers {
|
||||
if header.KeyID != "" {
|
||||
kid = header.KeyID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if kid == "" {
|
||||
return key
|
||||
}
|
||||
|
||||
keys := jwks.Key(kid)
|
||||
if len(keys) == 0 {
|
||||
return key
|
||||
}
|
||||
|
||||
return keys[0].Key
|
||||
}
|
||||
22
vendor/gopkg.in/square/go-jose.v2/jws.go → vendor/github.com/go-jose/go-jose/v3/jws.go
generated
vendored
22
vendor/gopkg.in/square/go-jose.v2/jws.go → vendor/github.com/go-jose/go-jose/v3/jws.go
generated
vendored
|
|
@ -23,7 +23,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
|
||||
|
|
@ -75,7 +75,7 @@ type Signature struct {
|
|||
original *rawSignatureInfo
|
||||
}
|
||||
|
||||
// ParseSigned parses a signed message in compact or full serialization format.
|
||||
// ParseSigned parses a signed message in compact or JWS JSON Serialization format.
|
||||
func ParseSigned(signature string) (*JSONWebSignature, error) {
|
||||
signature = stripWhitespace(signature)
|
||||
if strings.HasPrefix(signature, "{") {
|
||||
|
|
@ -88,7 +88,7 @@ func ParseSigned(signature string) (*JSONWebSignature, error) {
|
|||
// ParseDetached parses a signed message in compact serialization format with detached payload.
|
||||
func ParseDetached(signature string, payload []byte) (*JSONWebSignature, error) {
|
||||
if payload == nil {
|
||||
return nil, errors.New("square/go-jose: nil payload")
|
||||
return nil, errors.New("go-jose/go-jose: nil payload")
|
||||
}
|
||||
return parseSignedCompact(stripWhitespace(signature), payload)
|
||||
}
|
||||
|
|
@ -151,7 +151,7 @@ func parseSignedFull(input string) (*JSONWebSignature, error) {
|
|||
// sanitized produces a cleaned-up JWS object from the raw JSON.
|
||||
func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
|
||||
if parsed.Payload == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: missing payload in JWS message")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: missing payload in JWS message")
|
||||
}
|
||||
|
||||
obj := &JSONWebSignature{
|
||||
|
|
@ -215,7 +215,7 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
|
|||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
||||
jwk := signature.Header.JSONWebKey
|
||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
||||
return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
|
||||
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
|
||||
}
|
||||
|
||||
obj.Signatures = append(obj.Signatures, signature)
|
||||
|
|
@ -260,7 +260,7 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
|
|||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
||||
jwk := obj.Signatures[i].Header.JSONWebKey
|
||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
||||
return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
|
||||
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
|
||||
}
|
||||
|
||||
// Copy value of sig
|
||||
|
|
@ -277,26 +277,26 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
|
|||
func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
|
||||
}
|
||||
|
||||
if parts[1] != "" && payload != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: payload is not detached")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
|
||||
}
|
||||
|
||||
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
|
||||
rawProtected, err := base64URLDecode(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if payload == nil {
|
||||
payload, err = base64.RawURLEncoding.DecodeString(parts[1])
|
||||
payload, err = base64URLDecode(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
signature, err := base64.RawURLEncoding.DecodeString(parts[2])
|
||||
signature, err := base64URLDecode(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// KeyAlgorithm represents a key management algorithm.
|
||||
|
|
@ -45,32 +45,32 @@ var (
|
|||
// ErrCryptoFailure represents an error in cryptographic primitive. This
|
||||
// occurs when, for example, a message had an invalid authentication tag or
|
||||
// could not be decrypted.
|
||||
ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive")
|
||||
ErrCryptoFailure = errors.New("go-jose/go-jose: error in cryptographic primitive")
|
||||
|
||||
// ErrUnsupportedAlgorithm indicates that a selected algorithm is not
|
||||
// supported. This occurs when trying to instantiate an encrypter for an
|
||||
// algorithm that is not yet implemented.
|
||||
ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm")
|
||||
ErrUnsupportedAlgorithm = errors.New("go-jose/go-jose: unknown/unsupported algorithm")
|
||||
|
||||
// ErrUnsupportedKeyType indicates that the given key type/format is not
|
||||
// supported. This occurs when trying to instantiate an encrypter and passing
|
||||
// it a key of an unrecognized type or with unsupported parameters, such as
|
||||
// an RSA private key with more than two primes.
|
||||
ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format")
|
||||
ErrUnsupportedKeyType = errors.New("go-jose/go-jose: unsupported key type/format")
|
||||
|
||||
// ErrInvalidKeySize indicates that the given key is not the correct size
|
||||
// for the selected algorithm. This can occur, for example, when trying to
|
||||
// encrypt with AES-256 but passing only a 128-bit key as input.
|
||||
ErrInvalidKeySize = errors.New("square/go-jose: invalid key size for algorithm")
|
||||
ErrInvalidKeySize = errors.New("go-jose/go-jose: invalid key size for algorithm")
|
||||
|
||||
// ErrNotSupported serialization of object is not supported. This occurs when
|
||||
// trying to compact-serialize an object which can't be represented in
|
||||
// compact form.
|
||||
ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object")
|
||||
ErrNotSupported = errors.New("go-jose/go-jose: compact serialization not supported for object")
|
||||
|
||||
// ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
|
||||
// nonce header parameter was included in an unprotected header object.
|
||||
ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header")
|
||||
ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header")
|
||||
)
|
||||
|
||||
// Key management algorithms
|
||||
|
|
@ -133,8 +133,8 @@ const (
|
|||
type HeaderKey string
|
||||
|
||||
const (
|
||||
HeaderType HeaderKey = "typ" // string
|
||||
HeaderContentType = "cty" // string
|
||||
HeaderType = "typ" // string
|
||||
HeaderContentType = "cty" // string
|
||||
|
||||
// These are set by go-jose and shouldn't need to be set by consumers of the
|
||||
// library.
|
||||
|
|
@ -194,7 +194,7 @@ type Header struct {
|
|||
// not be validated with the given verify options.
|
||||
func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) {
|
||||
if len(h.certificates) == 0 {
|
||||
return nil, errors.New("square/go-jose: no x5c header present in message")
|
||||
return nil, errors.New("go-jose/go-jose: no x5c header present in message")
|
||||
}
|
||||
|
||||
leaf := h.certificates[0]
|
||||
|
|
@ -452,8 +452,8 @@ func parseCertificateChain(chain []string) ([]*x509.Certificate, error) {
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (dst rawHeader) isSet(k HeaderKey) bool {
|
||||
dvr := dst[k]
|
||||
func (parsed rawHeader) isSet(k HeaderKey) bool {
|
||||
dvr := parsed[k]
|
||||
if dvr == nil {
|
||||
return false
|
||||
}
|
||||
|
|
@ -472,17 +472,17 @@ func (dst rawHeader) isSet(k HeaderKey) bool {
|
|||
}
|
||||
|
||||
// Merge headers from src into dst, giving precedence to headers from l.
|
||||
func (dst rawHeader) merge(src *rawHeader) {
|
||||
func (parsed rawHeader) merge(src *rawHeader) {
|
||||
if src == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range *src {
|
||||
if dst.isSet(k) {
|
||||
if parsed.isSet(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
dst[k] = v
|
||||
parsed[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -496,7 +496,7 @@ func curveName(crv elliptic.Curve) (string, error) {
|
|||
case elliptic.P521():
|
||||
return "P-521", nil
|
||||
default:
|
||||
return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve")
|
||||
return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -19,14 +19,13 @@ package jose
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// NonceSource represents a source of random nonces to go into JWS objects
|
||||
|
|
@ -227,7 +226,7 @@ func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigIn
|
|||
|
||||
// This should be impossible, but let's check anyway.
|
||||
if !recipient.publicKey().IsPublic() {
|
||||
return recipientSigInfo{}, errors.New("square/go-jose: public key was unexpectedly not public")
|
||||
return recipientSigInfo{}, errors.New("go-jose/go-jose: public key was unexpectedly not public")
|
||||
}
|
||||
}
|
||||
return recipient, nil
|
||||
|
|
@ -251,7 +250,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
|
|||
// result of the JOSE spec. We've decided that this library will only include one or
|
||||
// the other to avoid this confusion.
|
||||
//
|
||||
// See https://github.com/square/go-jose/issues/157 for more context.
|
||||
// See https://github.com/go-jose/go-jose/issues/157 for more context.
|
||||
if ctx.embedJWK {
|
||||
protected[headerJWK] = recipient.publicKey()
|
||||
} else {
|
||||
|
|
@ -265,7 +264,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
|
|||
if ctx.nonceSource != nil {
|
||||
nonce, err := ctx.nonceSource.Nonce()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: Error generating nonce: %v", err)
|
||||
}
|
||||
protected[headerNonce] = nonce
|
||||
}
|
||||
|
|
@ -279,7 +278,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
|
|||
|
||||
if b64, ok := protected[headerB64]; ok {
|
||||
if needsBase64, ok = b64.(bool); !ok {
|
||||
return nil, errors.New("square/go-jose: Invalid b64 header parameter")
|
||||
return nil, errors.New("go-jose/go-jose: Invalid b64 header parameter")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -303,7 +302,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
|
|||
for k, v := range protected {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: Error marshalling item %#v: %v", k, err)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: Error marshalling item %#v: %v", k, err)
|
||||
}
|
||||
(*signatureInfo.protected)[k] = makeRawMessage(b)
|
||||
}
|
||||
|
|
@ -348,13 +347,14 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
|
|||
// is only useful if you have a payload and signature that are separated from
|
||||
// each other.
|
||||
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
|
||||
verifier, err := newVerifier(verificationKey)
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(obj.Signatures) > 1 {
|
||||
return errors.New("square/go-jose: too many signatures in payload; expecting only one")
|
||||
return errors.New("go-jose/go-jose: too many signatures in payload; expecting only one")
|
||||
}
|
||||
|
||||
signature := obj.Signatures[0]
|
||||
|
|
@ -406,7 +406,8 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
|
|||
// separated from each other, and the signature can have multiple signers at the
|
||||
// same time.
|
||||
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
|
||||
verifier, err := newVerifier(verificationKey)
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return -1, Signature{}, err
|
||||
}
|
||||
|
|
@ -439,3 +440,11 @@ outer:
|
|||
|
||||
return -1, Signature{}, ErrCryptoFailure
|
||||
}
|
||||
|
||||
func (obj JSONWebSignature) headers() []Header {
|
||||
headers := make([]Header, len(obj.Signatures))
|
||||
for i, sig := range obj.Signatures {
|
||||
headers[i] = sig.Header
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
|
@ -31,10 +31,11 @@ import (
|
|||
"io"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"gopkg.in/square/go-jose.v2/cipher"
|
||||
|
||||
josecipher "github.com/go-jose/go-jose/v3/cipher"
|
||||
)
|
||||
|
||||
// Random reader (stubbed out in tests)
|
||||
// RandReader is a cryptographically secure random number generator (stubbed out in tests).
|
||||
var RandReader = rand.Reader
|
||||
|
||||
const (
|
||||
|
|
@ -278,8 +279,14 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie
|
|||
}
|
||||
|
||||
header := &rawHeader{}
|
||||
header.set(headerIV, newBuffer(parts.iv))
|
||||
header.set(headerTag, newBuffer(parts.tag))
|
||||
|
||||
if err = header.set(headerIV, newBuffer(parts.iv)); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
if err = header.set(headerTag, newBuffer(parts.tag)); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
header: header,
|
||||
|
|
@ -332,8 +339,14 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie
|
|||
}
|
||||
|
||||
header := &rawHeader{}
|
||||
header.set(headerP2C, ctx.p2c)
|
||||
header.set(headerP2S, newBuffer(ctx.p2s))
|
||||
|
||||
if err = header.set(headerP2C, ctx.p2c); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
if err = header.set(headerP2S, newBuffer(ctx.p2s)); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
|
|
@ -356,11 +369,11 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
|
|||
|
||||
iv, err := headers.getIV()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid IV: %v", err)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid IV: %v", err)
|
||||
}
|
||||
tag, err := headers.getTag()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid tag: %v", err)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid tag: %v", err)
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
|
|
@ -389,18 +402,18 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
|
|||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
||||
p2s, err := headers.getP2S()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid P2S: %v", err)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: %v", err)
|
||||
}
|
||||
if p2s == nil || len(p2s.data) == 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid P2S: must be present")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: must be present")
|
||||
}
|
||||
|
||||
p2c, err := headers.getP2C()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid P2C: %v", err)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: %v", err)
|
||||
}
|
||||
if p2c <= 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid P2C: must be a positive integer")
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer")
|
||||
}
|
||||
|
||||
// salt is UTF8(Alg) || 0x00 || Salt Input
|
||||
|
|
@ -431,7 +444,7 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
|
|||
func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
mac, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return Signature{}, errors.New("square/go-jose: failed to compute hmac")
|
||||
return Signature{}, errors.New("go-jose/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
return Signature{
|
||||
|
|
@ -444,16 +457,16 @@ func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Sig
|
|||
func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
|
||||
expected, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return errors.New("square/go-jose: failed to compute hmac")
|
||||
return errors.New("go-jose/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
if len(mac) != len(expected) {
|
||||
return errors.New("square/go-jose: invalid hmac")
|
||||
return errors.New("go-jose/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
match := subtle.ConstantTimeCompare(mac, expected)
|
||||
if match != 1 {
|
||||
return errors.New("square/go-jose: invalid hmac")
|
||||
return errors.New("go-jose/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
return nil
|
||||
2
vendor/github.com/go-openapi/errors/api.go
generated
vendored
2
vendor/github.com/go-openapi/errors/api.go
generated
vendored
|
|
@ -112,7 +112,7 @@ func flattenComposite(errs *CompositeError) *CompositeError {
|
|||
for _, er := range errs.Errors {
|
||||
switch e := er.(type) {
|
||||
case *CompositeError:
|
||||
if len(e.Errors) > 0 {
|
||||
if e != nil && len(e.Errors) > 0 {
|
||||
flat := flattenComposite(e)
|
||||
if len(flat.Errors) > 0 {
|
||||
res = append(res, flat.Errors...)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue